code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def UpperCAmelCase_ ( *_lowerCamelCase , **_lowerCamelCase ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__A : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ = len(_lowerCamelCase )
self.assertGreater(_lowerCamelCase , 0 )
self.assertEqual(
_lowerCamelCase , [
{
'''score''': ANY(_lowerCamelCase ),
'''label''': ANY(_lowerCamelCase ),
'''box''': {'''xmin''': ANY(_lowerCamelCase ), '''ymin''': ANY(_lowerCamelCase ), '''xmax''': ANY(_lowerCamelCase ), '''ymax''': ANY(_lowerCamelCase )},
}
for i in range(_lowerCamelCase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCAmelCase_ ( self ):
pass
@require_torch
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
lowerCAmelCase_ = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
lowerCAmelCase_ = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCAmelCase_ ( self ):
pass
@require_torch
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 0.2
lowerCAmelCase_ = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 2
lowerCAmelCase_ = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 274 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Optional[Any] ={
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict =['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =[
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A_ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 42
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , _lowercase , _lowercase):
snake_case__ = 32
snake_case__ = 4
snake_case__ = 4
snake_case__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case__ = False
snake_case__ = (3_20, 6_40, 12_80, 12_80)
snake_case__ = 2
snake_case__ = 8
snake_case__ = None
snake_case__ = 12_80
snake_case__ = 0.0
snake_case__ = False
snake_case__ = jnp.floataa
snake_case__ = True
snake_case__ = 0
snake_case__ = False
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(__UpperCamelCase )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )["params"]
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(__UpperCamelCase , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(__UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCamelCase )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(__UpperCamelCase ) )
_UpperCamelCase = list(reversed(__UpperCamelCase ) )
_UpperCamelCase = list(reversed(__UpperCamelCase ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(__UpperCamelCase ) - 1 )]
_UpperCamelCase = i == len(__UpperCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__UpperCamelCase )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : int=None , __UpperCamelCase : str=None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(__UpperCamelCase , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(__UpperCamelCase , 0 )
_UpperCamelCase = self.time_proj(__UpperCamelCase )
_UpperCamelCase = self.time_embedding(__UpperCamelCase )
# 2. pre-process
_UpperCamelCase = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(__UpperCamelCase )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase , _UpperCamelCase = down_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(__UpperCamelCase , __UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
__UpperCamelCase , __UpperCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = up_block(
__UpperCamelCase , temb=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train , )
else:
_UpperCamelCase = up_block(__UpperCamelCase , temb=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(__UpperCamelCase )
_UpperCamelCase = nn.silu(__UpperCamelCase )
_UpperCamelCase = self.conv_out(__UpperCamelCase )
_UpperCamelCase = jnp.transpose(__UpperCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__UpperCamelCase )
| 342 | """simple docstring"""
import cmath
import math
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> complex:
_UpperCamelCase = math.radians(a__ )
_UpperCamelCase = math.radians(a__ )
# Convert voltage and current to rectangular form
_UpperCamelCase = cmath.rect(a__ , a__ )
_UpperCamelCase = cmath.rect(a__ , a__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase ( lowercase__ ):
lowercase = '''openai/whisper-base'''
lowercase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowercase = '''transcriber'''
lowercase = WhisperProcessor
lowercase = WhisperForConditionalGeneration
lowercase = ['''audio''']
lowercase = ['''text''']
def UpperCAmelCase (self : int ,SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(SCREAMING_SNAKE_CASE_ ,return_tensors='''pt''' ).input_features
def UpperCAmelCase (self : str ,SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
"""simple docstring"""
return self.model.generate(inputs=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ )[0]
| 535 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( lowercase__ ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''CLIPImageProcessor'''
lowercase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__(self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : int=None ,SCREAMING_SNAKE_CASE_ : str=None ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,SCREAMING_SNAKE_CASE_ ,)
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __call__(self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : List[str]=None ,SCREAMING_SNAKE_CASE_ : Optional[int]=None ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if images is not None:
lowerCAmelCase = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) ,tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ,*SCREAMING_SNAKE_CASE_ : Union[str, Any] ,**SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ,*SCREAMING_SNAKE_CASE_ : int ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase (self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 535 | 1 |
def UpperCamelCase_ ( __a ) -> bool:
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
def UpperCamelCase_ ( __a = 3 , __a = 7 , __a = 1_000_000 ) -> int:
a__ : List[Any] = 0
a__ : int = 1
for current_denominator in range(1 , limit + 1 ):
a__ : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ : int = current_numerator
a__ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 151 | 1 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 | import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg")
SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _a ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ )
# assert canny array for at least one True
assert canny_array.any()
def _a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ )
assert res.any()
def _a ( ):
'''simple docstring'''
assert med.median_filter(lowercase__ , 3 ).any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ )
assert grad.any() and theta.any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 )
assert sepia.all()
def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ )
assert lbp_image.any()
| 85 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : torch.FloatTensor
class SCREAMING_SNAKE_CASE_ ( _lowercase , _lowercase):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase__ = 65_536 , lowerCamelCase__ = None , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0 , lowerCamelCase__ = "fourier" , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ = "UNetMidBlock1D" , lowerCamelCase__ = None , lowerCamelCase__ = (32, 32, 64) , lowerCamelCase__ = None , lowerCamelCase__ = 8 , lowerCamelCase__ = 1 , lowerCamelCase__ = False , ) -> str:
'''simple docstring'''
super().__init__()
snake_case__ : int = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__)
snake_case__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__)
snake_case__ : List[Any] = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Tuple = block_out_channels[0] * 4
snake_case__ : Tuple = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
snake_case__ : Any = nn.ModuleList([])
snake_case__ : Optional[int] = None
snake_case__ : str = nn.ModuleList([])
snake_case__ : Any = None
# down
snake_case__ : str = in_channels
for i, down_block_type in enumerate(lowerCamelCase__):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : int = i == len(lowerCamelCase__) - 1
snake_case__ : Union[str, Any] = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__)
# mid
snake_case__ : str = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
snake_case__ : List[Any] = list(reversed(lowerCamelCase__))
snake_case__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : Optional[int] = out_channels
else:
snake_case__ : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__):
snake_case__ : int = output_channel
snake_case__ : Any = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__) - 1 else final_upsample_channels
)
snake_case__ : List[Any] = i == len(lowerCamelCase__) - 1
snake_case__ : Any = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__)
snake_case__ : str = output_channel
# out
snake_case__ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
snake_case__ : Any = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
snake_case__ : List[str] = timestep
if not torch.is_tensor(lowerCamelCase__):
snake_case__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(lowerCamelCase__) and len(timesteps.shape) == 0:
snake_case__ : Tuple = timesteps[None].to(sample.device)
snake_case__ : str = self.time_proj(lowerCamelCase__)
if self.config.use_timestep_embedding:
snake_case__ : Union[str, Any] = self.time_mlp(lowerCamelCase__)
else:
snake_case__ : List[Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
snake_case__ : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
snake_case__ : List[str] = ()
for downsample_block in self.down_blocks:
snake_case__ : Optional[Any] = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Dict = self.mid_block(lowerCamelCase__ , lowerCamelCase__)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
snake_case__ : Optional[Any] = down_block_res_samples[-1:]
snake_case__ : Optional[int] = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__)
# 5. post-process
if self.out_block:
snake_case__ : Optional[int] = self.out_block(lowerCamelCase__ , lowerCamelCase__)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__)
| 719 |
"""simple docstring"""
import numpy
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__) -> None:
'''simple docstring'''
snake_case__ : str = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case__ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case__ : Optional[Any] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case__ : str = numpy.random.rand(3 , 1)
# Real output values provided.
snake_case__ : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case__ : Optional[int] = numpy.zeros(output_array.shape)
def UpperCAmelCase ( self) -> numpy.ndarray:
'''simple docstring'''
snake_case__ : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case__ : List[str] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase ( self) -> None:
'''simple docstring'''
snake_case__ : int = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
snake_case__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
snake_case__ : List[str] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1):
snake_case__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
snake_case__ : Optional[Any] = numpy.mean(numpy.square(output - self.feedforward()))
print(f"""Iteration {iteration} Loss: {loss}""")
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = input_arr
snake_case__ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
snake_case__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
snake_case__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def A__ ( _UpperCAmelCase : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def A__ ( _UpperCAmelCase : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def A__ ( ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case__ : Dict = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case__ : Any = TwoHiddenLayerNeuralNetwork(
input_array=_UpperCAmelCase , output_array=_UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_UpperCAmelCase , iterations=10 , give_loss=_UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 150 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ : Tuple = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
with open(__A , 'r' ) as f:
a_ : int = f.read().splitlines()
return [l.strip() for l in lines]
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE__ : str="<cls>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : int="<eos>" , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = load_vocab_file(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = dict(enumerate(self.all_tokens ) )
a_ : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a_ : List[str] = unk_token
a_ : Optional[int] = cls_token
a_ : List[str] = pad_token
a_ : Optional[Any] = mask_token
a_ : Dict = eos_token
a_ : Dict = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
return text.split()
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str=False ) -> List[str]:
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> int:
return self._token_to_id.get(SCREAMING_SNAKE_CASE__ , self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self._id_to_token.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : Any = [self.cls_token_id]
a_ : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List , SCREAMING_SNAKE_CASE__ : Optional[List] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a_ : List[str] = [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE__ ) + [1]
return mask
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
a_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[List[str], List[AddedToken]] , SCREAMING_SNAKE_CASE__ : bool = False ) -> int:
return super()._add_tokens(SCREAMING_SNAKE_CASE__ , special_tokens=SCREAMING_SNAKE_CASE__ )
| 570 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Dict = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'conditional_detr'
lowerCAmelCase = ['past_key_values']
lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , a__=True , a__=None , a__=3 , a__=300 , a__=6 , a__=2048 , a__=8 , a__=6 , a__=2048 , a__=8 , a__=0.0 , a__=0.0 , a__=True , a__="relu" , a__=256 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=False , a__="sine" , a__="resnet50" , a__=True , a__=False , a__=2 , a__=5 , a__=2 , a__=1 , a__=1 , a__=2 , a__=5 , a__=2 , a__=0.25 , **a__ , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(a__ , a__ ):
A = backbone_config.get("""model_type""" )
A = CONFIG_MAPPING[backbone_model_type]
A = config_class.from_dict(a__ )
A = use_timm_backbone
A = backbone_config
A = num_channels
A = num_queries
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = init_xavier_std
A = encoder_layerdrop
A = decoder_layerdrop
A = encoder_layers
A = auxiliary_loss
A = position_embedding_type
A = backbone
A = use_pretrained_backbone
A = dilation
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = mask_loss_coefficient
A = dice_loss_coefficient
A = cls_loss_coefficient
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = focal_alpha
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def _UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ) -> int:
return self.d_model
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = version.parse('1.11' )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def _UpperCAmelCase ( self ) -> int:
return 12
| 546 |
import os
def _lowerCAmelCase ( UpperCamelCase__: str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as in_file:
A = in_file.read()
A = [[int(UpperCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
A = [[0 for cell in row] for row in grid]
A = len(grid[0] )
A = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
A = grid[0][0]
for i in range(1 , UpperCamelCase__ ):
A = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCamelCase__ ):
A = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
A = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 546 | 1 |
def __lowercase ( __lowerCAmelCase : Tuple ):
if any(not isinstance(_UpperCamelCase , _UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(_UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 335 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A__ , unittest.TestCase ):
__lowerCamelCase = LayoutLMTokenizer
__lowerCamelCase = LayoutLMTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : int , **__a : Union[str, Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : List[Any] , __a : List[str] ):
'''simple docstring'''
lowerCamelCase__: str = """UNwant\u00E9d,running"""
lowerCamelCase__: Any = """unwanted, running"""
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__: Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
| 306 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Tuple = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowercase ( _UpperCamelCase ):
lowerCamelCase : Tuple = "biogpt"
def __init__( self : List[Any] , _lowercase : List[Any]=4_23_84 , _lowercase : Optional[Any]=10_24 , _lowercase : Dict=24 , _lowercase : List[str]=16 , _lowercase : Any=40_96 , _lowercase : Tuple="gelu" , _lowercase : List[str]=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : Any=10_24 , _lowercase : Dict=0.02 , _lowercase : str=1E-12 , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[str]=1 , _lowercase : Dict=0 , _lowercase : Any=2 , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = scale_embedding
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : Any = layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 707 |
def a ( A__ = 4_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = b, a + b
return sum(A__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 250 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['image_processor', 'tokenizer']
_lowerCamelCase : Dict = 'BlipImageProcessor'
_lowerCamelCase : Optional[int] = 'AutoTokenizer'
def __init__( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
# add QFormer tokenizer
A_ = qformer_tokenizer
def __call__( self : Optional[Any] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ):
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
A_ = BatchFeature()
if text is not None:
A_ = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
encoding.update(UpperCAmelCase )
A_ = self.qformer_tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
A_ = qformer_text_encoding.pop("input_ids" )
A_ = qformer_text_encoding.pop("attention_mask" )
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def __A ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self : Any ):
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ):
if os.path.isfile(UpperCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase )
return super().save_pretrained(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( cls : Optional[int] , UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase , subfolder="qformer_tokenizer" )
A_ = cls._get_arguments_from_pretrained(UpperCAmelCase , **UpperCAmelCase )
args.append(UpperCAmelCase )
return cls(*UpperCAmelCase ) | 86 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 86 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( a_: List[Any], a_: Dict ):
_UpperCAmelCase : Dict = checkpoint
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[Any] = vae_state_dict["""encoder.conv_in.weight"""]
_UpperCAmelCase : Any = vae_state_dict["""encoder.conv_in.bias"""]
_UpperCAmelCase : int = vae_state_dict["""encoder.conv_out.weight"""]
_UpperCAmelCase : List[Any] = vae_state_dict["""encoder.conv_out.bias"""]
_UpperCAmelCase : List[str] = vae_state_dict["""encoder.norm_out.weight"""]
_UpperCAmelCase : Any = vae_state_dict["""encoder.norm_out.bias"""]
_UpperCAmelCase : Any = vae_state_dict["""decoder.conv_in.weight"""]
_UpperCAmelCase : List[Any] = vae_state_dict["""decoder.conv_in.bias"""]
_UpperCAmelCase : Union[str, Any] = vae_state_dict["""decoder.conv_out.weight"""]
_UpperCAmelCase : int = vae_state_dict["""decoder.conv_out.bias"""]
_UpperCAmelCase : Any = vae_state_dict["""decoder.norm_out.weight"""]
_UpperCAmelCase : List[str] = vae_state_dict["""decoder.norm_out.bias"""]
_UpperCAmelCase : Any = vae_state_dict["""quant_conv.weight"""]
_UpperCAmelCase : str = vae_state_dict["""quant_conv.bias"""]
_UpperCAmelCase : List[Any] = vae_state_dict["""post_quant_conv.weight"""]
_UpperCAmelCase : Any = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
_UpperCAmelCase : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
_UpperCAmelCase : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
_UpperCAmelCase : List[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
_UpperCAmelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
_UpperCAmelCase : Optional[int] = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_UpperCAmelCase : Dict = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
_UpperCAmelCase : int = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
_UpperCAmelCase : Dict = renew_vae_resnet_paths(snake_case_ )
_UpperCAmelCase : Tuple = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
_UpperCAmelCase : Tuple = [key for key in vae_state_dict if """encoder.mid.block""" in key]
_UpperCAmelCase : List[str] = 2
for i in range(1, num_mid_res_blocks + 1 ):
_UpperCAmelCase : Dict = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
_UpperCAmelCase : Any = renew_vae_resnet_paths(snake_case_ )
_UpperCAmelCase : Tuple = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
_UpperCAmelCase : List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
_UpperCAmelCase : List[str] = renew_vae_attention_paths(snake_case_ )
_UpperCAmelCase : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
_UpperCAmelCase : Dict = num_up_blocks - 1 - i
_UpperCAmelCase : Any = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_UpperCAmelCase : Tuple = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
_UpperCAmelCase : Union[str, Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
_UpperCAmelCase : List[str] = renew_vae_resnet_paths(snake_case_ )
_UpperCAmelCase : Union[str, Any] = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
_UpperCAmelCase : List[str] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
_UpperCAmelCase : str = 2
for i in range(1, num_mid_res_blocks + 1 ):
_UpperCAmelCase : Dict = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
_UpperCAmelCase : List[Any] = renew_vae_resnet_paths(snake_case_ )
_UpperCAmelCase : Any = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
_UpperCAmelCase : str = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
_UpperCAmelCase : List[str] = renew_vae_attention_paths(snake_case_ )
_UpperCAmelCase : Union[str, Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def __UpperCAmelCase ( a_: Any, a_: str, ):
# Only support V1
_UpperCAmelCase : str = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
_UpperCAmelCase : Any = io.BytesIO(r.content )
_UpperCAmelCase : Optional[Any] = OmegaConf.load(snake_case_ )
_UpperCAmelCase : Tuple = 512
_UpperCAmelCase : str = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
_UpperCAmelCase : int = {}
with safe_open(snake_case_, framework="pt", device="cpu" ) as f:
for key in f.keys():
_UpperCAmelCase : Optional[Any] = f.get_tensor(snake_case_ )
else:
_UpperCAmelCase : str = torch.load(snake_case_, map_location=snake_case_ )["""state_dict"""]
# Convert the VAE model.
_UpperCAmelCase : int = create_vae_diffusers_config(snake_case_, image_size=snake_case_ )
_UpperCAmelCase : Optional[int] = custom_convert_ldm_vae_checkpoint(snake_case_, snake_case_ )
_UpperCAmelCase : List[str] = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 710 | '''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any], a_: int ):
_UpperCAmelCase : List[str] = UniSpeechSatForSequenceClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["projector.weight"]
_UpperCAmelCase : List[Any] = downstream_dict["projector.bias"]
_UpperCAmelCase : Union[str, Any] = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase ( a_: Optional[Any], a_: Union[str, Any], a_: str ):
_UpperCAmelCase : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[str] = downstream_dict["model.linear.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase ( a_: Tuple, a_: List[str], a_: Any ):
_UpperCAmelCase : int = UniSpeechSatForXVector.from_pretrained(a_, config=a_ )
_UpperCAmelCase : List[Any] = downstream_dict["connector.weight"]
_UpperCAmelCase : Optional[int] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase : str = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_UpperCAmelCase : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_UpperCAmelCase : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple, a_: Any, a_: Optional[Any] ):
_UpperCAmelCase : Any = torch.load(a_, map_location="cpu" )
_UpperCAmelCase : Tuple = checkpoint["Downstream"]
_UpperCAmelCase : Dict = UniSpeechSatConfig.from_pretrained(a_ )
_UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(
a_, return_attention_mask=a_, do_normalize=a_ )
_UpperCAmelCase : List[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase : Union[str, Any] = convert_classification(a_, a_, a_ )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase : Any = convert_diarization(a_, a_, a_ )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase : str = convert_xvector(a_, a_, a_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase : Union[str, Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 257 | 0 |
import math
def snake_case (UpperCAmelCase__ ) -> bool:
return math.sqrt(UpperCAmelCase__ ) * math.sqrt(UpperCAmelCase__ ) == num
def snake_case (UpperCAmelCase__ ) -> bool:
UpperCamelCase_: List[str] = 0
UpperCamelCase_: Tuple = n
while left <= right:
UpperCamelCase_: int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCamelCase_: List[str] = mid - 1
else:
UpperCamelCase_: List[str] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : int = get_tests_dir('fixtures')
A_ : int = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
A_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE : Optional[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('feature_extractor_type' )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(**_SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , revision='aaaaaa' )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = True
try:
AutoConfig.register('custom' , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_SCREAMING_SNAKE_CASE , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 265 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , ):
'''simple docstring'''
snake_case_ : List[Any] = len(UpperCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , )
def lowerCamelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Dict = []
depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase__ )
print("""""" )
print(len(UpperCamelCase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 703 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
__A : str = tuple[int, int]
class __UpperCamelCase :
def __init__( self :Union[str, Any] ,_UpperCamelCase :set[int] ,_UpperCamelCase :Mapping[EdgeT, int] ):
snake_case_ : set[int] = vertices
snake_case_ : dict[EdgeT, int] = {
(min(_UpperCamelCase ), max(_UpperCamelCase )): weight for edge, weight in edges.items()
}
def a__ ( self :Tuple ,_UpperCamelCase :EdgeT ,_UpperCamelCase :int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
snake_case_ : str = weight
def a__ ( self :Tuple ):
snake_case_ : Graph = Graph({min(self.vertices )} ,{} )
snake_case_ : EdgeT
snake_case_ : int
snake_case_ : EdgeT
snake_case_ : int
while len(subgraph.vertices ) < len(self.vertices ):
snake_case_ : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case_ : Dict = edge
snake_case_ : Dict = weight
subgraph.add_edge(_UpperCamelCase ,_UpperCamelCase )
return subgraph
def UpperCAmelCase ( lowerCamelCase_ :str = "p107_network.txt" ):
'''simple docstring'''
snake_case_ : str = os.path.abspath(os.path.dirname(lowerCamelCase_ ) )
snake_case_ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : dict[EdgeT, int] = {}
snake_case_ : list[str]
snake_case_ : int
snake_case_ : int
with open(lowerCamelCase_ ) as f:
snake_case_ : Optional[int] = f.read().strip().split("""\n""" )
snake_case_ : Any = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(lowerCamelCase_ ) ):
for edgea in range(lowerCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case_ : str = int(adjaceny_matrix[edgea][edgea] )
snake_case_ : Graph = Graph(set(range(len(lowerCamelCase_ ) ) ) , lowerCamelCase_ )
snake_case_ : Graph = graph.prims_algorithm()
snake_case_ : int = sum(graph.edges.values() )
snake_case_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }') | 267 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCamelCase :
"""simple docstring"""
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return self.get_dummy_input()
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , )->List[str]:
'''simple docstring'''
A_ : List[str] = 4
A_ : Optional[int] = 32
A_ : Tuple = (32, 32)
A_ : Optional[int] = torch.manual_seed(0 )
A_ : str = torch.device(_SCREAMING_SNAKE_CASE )
A_ : Tuple = (batch_size, num_channels) + sizes
A_ : Dict = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
A_ : Tuple = {'''hidden_states''': hidden_states}
if include_temb:
A_ : Tuple = 128
A_ : List[str] = randn_tensor((batch_size, temb_channels) , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
if include_res_hidden_states_tuple:
A_ : Dict = torch.manual_seed(1 )
A_ : int = (randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ),)
if include_encoder_hidden_states:
A_ : int = floats_tensor((batch_size, 32, 32) ).to(_SCREAMING_SNAKE_CASE )
if include_skip_sample:
A_ : str = randn_tensor(((batch_size, 3) + sizes) , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
return dummy_input
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[Any] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
A_ : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
A_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ , A_ : int = self.prepare_init_args_and_inputs_for_common()
A_ : List[Any] = self.block_class(**_SCREAMING_SNAKE_CASE )
unet_block.to(_SCREAMING_SNAKE_CASE )
unet_block.eval()
with torch.no_grad():
A_ : Optional[int] = unet_block(**_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ : Any = output[0, -1, -3:, -3:]
A_ : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
assert torch_all_close(output_slice.flatten() , _SCREAMING_SNAKE_CASE , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ , A_ : str = self.prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = self.block_class(**_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = output[0]
A_ : Optional[int] = torch.device(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = randn_tensor(output.shape , device=_SCREAMING_SNAKE_CASE )
A_ : Any = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
| 590 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCamelCase :
"""simple docstring"""
# setable values
snake_case = None
snake_case = None
snake_case = None # sigma(t_i)
@classmethod
def _snake_case ( cls )->int:
'''simple docstring'''
return cls()
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
return True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Union[str, Any]:
'''simple docstring'''
pass
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = () )->KarrasVeSchedulerState:
'''simple docstring'''
A_ : str = jnp.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy()
A_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE , schedule=jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A_ : List[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Any = random.split(_SCREAMING_SNAKE_CASE , num=1 )
A_ : List[str] = self.config.s_noise * random.normal(key=_SCREAMING_SNAKE_CASE , shape=sample.shape )
A_ : Optional[Any] = sigma + gamma * sigma
A_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : int = sample_hat + sigma_hat * model_output
A_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : List[Any] = sample_prev + sigma_prev * model_output
A_ : str = (sample_prev - pred_original_sample) / sigma_prev
A_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 590 | 1 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCAmelCase_ : List[Any] = HfArgumentParser(InitializationArguments)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCAmelCase_ : Optional[int] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
UpperCAmelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCAmelCase_ : List[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 424 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( __A , unittest.TestCase ):
UpperCAmelCase : int = MgpstrTokenizer
UpperCAmelCase : Any = False
UpperCAmelCase : str = {}
UpperCAmelCase : int = False
def A ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
a_ : List[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
a_ : List[str] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
def A ( self , **UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def A ( self , UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
a_ : List[Any] = """tester"""
a_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def A ( self ) -> Optional[int]:
"""simple docstring"""
pass
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Optional[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a_ : str = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
a_ : Tuple = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
a_ : List[Any] = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertTrue(special_token not in decoded )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
a_ , a_ : str = self.get_input_output_texts(UpperCamelCase_ )
a_ : List[str] = tokenizer.tokenize(UpperCamelCase_ )
a_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
a_ : Dict = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
a_ : int = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertNotEqual(len(UpperCamelCase_ ) , 0 )
a_ : Union[str, Any] = tokenizer.decode(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , UpperCamelCase_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def A ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def A ( self ) -> int:
"""simple docstring"""
pass
| 419 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=2 , UpperCamelCase_=32 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=[0, 1, 2, 3] , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=[1, 384, 24, 24] , UpperCamelCase_=True , UpperCamelCase_=None , ) -> Optional[Any]:
"""simple docstring"""
a_ : Optional[int] = parent
a_ : Dict = batch_size
a_ : Tuple = image_size
a_ : Any = patch_size
a_ : Dict = num_channels
a_ : int = is_training
a_ : Optional[Any] = use_labels
a_ : int = hidden_size
a_ : str = num_hidden_layers
a_ : List[str] = backbone_out_indices
a_ : str = num_attention_heads
a_ : Union[str, Any] = intermediate_size
a_ : str = hidden_act
a_ : Any = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : Optional[int] = initializer_range
a_ : Any = num_labels
a_ : Optional[Any] = backbone_featmap_shape
a_ : str = scope
a_ : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a_ : List[Any] = (image_size // patch_size) ** 2
a_ : Any = num_patches + 1
def A ( self ) -> Any:
"""simple docstring"""
a_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : int = None
if self.use_labels:
a_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ : Dict = self.get_config()
return config, pixel_values, labels
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : Optional[int] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCamelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
a_ : Tuple = DPTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a_ : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
a_ : Tuple = self.num_labels
a_ : Optional[int] = DPTForDepthEstimation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a_ : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
a_ : List[Any] = self.num_labels
a_ : Dict = DPTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a_ : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : str = self.prepare_config_and_inputs()
a_ , a_ , a_ : Tuple = config_and_inputs
a_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __A , __A , unittest.TestCase ):
UpperCAmelCase : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase : Optional[Any] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : str = False
UpperCAmelCase : Any = False
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : int = DPTModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def A ( self ) -> int:
"""simple docstring"""
pass
def A ( self ) -> List[str]:
"""simple docstring"""
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(UpperCamelCase_ )
a_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[str] = [*signature.parameters.keys()]
a_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCamelCase_ )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Tuple = True
if model_class in get_values(UpperCamelCase_ ):
continue
a_ : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
a_ : Any = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
a_ : List[Any] = model(**UpperCamelCase_ ).loss
loss.backward()
def A ( self ) -> Optional[int]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[Any] = False
a_ : str = True
if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
a_ : Tuple = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
a_ : Tuple = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
a_ : List[str] = model(**UpperCamelCase_ ).loss
loss.backward()
def A ( self ) -> str:
"""simple docstring"""
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Dict = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
a_ : int = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
a_ : Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a_ : Tuple = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def A ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a_ : List[str] = DPTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def A ( self ) -> int:
"""simple docstring"""
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : int = """add"""
with self.assertRaises(UpperCamelCase_ ):
a_ : List[str] = DPTForDepthEstimation(UpperCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class snake_case__ ( unittest.TestCase ):
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Any = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
a_ : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(UpperCamelCase_ )
a_ : Union[str, Any] = prepare_img()
a_ : Dict = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
a_ : Optional[int] = model(**UpperCamelCase_ )
a_ : List[Any] = outputs.predicted_depth
# verify the predicted depth
a_ : Tuple = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , UpperCamelCase_ )
a_ : Any = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , UpperCamelCase_ , atol=1e-4 ) )
| 419 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | """simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
_SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
model.to(_A)
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""")
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""")
_SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A)
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
| 635 | 1 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ):
"""simple docstring"""
if tokenize_kwargs is None:
_lowerCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
_lowerCAmelCase = truncation
_lowerCAmelCase = tokenize_kwargs
_lowerCAmelCase = {}
if return_tensors is not None:
_lowerCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def _lowercase ( self , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.framework
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
return model_inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.model(**_lowercase )
return model_outputs
def _lowercase ( self , _lowercase , _lowercase=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(*_lowercase , **_lowercase )
| 5 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : int = '▁'
snake_case__ : Dict = {'vocab_file': 'prophetnet.tokenizer'}
snake_case__ : int = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
snake_case__ : Any = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
snake_case__ : Any = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def lowerCamelCase__ ( _lowerCamelCase ) ->Optional[int]:
_UpperCAmelCase =collections.OrderedDict()
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as reader:
_UpperCAmelCase =reader.readlines()
for index, token in enumerate(_lowerCamelCase ):
_UpperCAmelCase =token.rstrip("\n" )
_UpperCAmelCase =index
return vocab
class _a ( A__ ):
"""simple docstring"""
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case =["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case="[SEP]" , _snake_case="[SEP]" , _snake_case="[SEP]" , _snake_case="[UNK]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
_UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_UpperCAmelCase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_UpperCAmelCase ={"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
_UpperCAmelCase =F"[unused{i}]"
_UpperCAmelCase =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_UpperCAmelCase =12
_UpperCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_snake_case )
def __getstate__( self ):
_UpperCAmelCase =self.__dict__.copy()
_UpperCAmelCase =None
return state
def __setstate__( self , _snake_case ):
_UpperCAmelCase =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase ={}
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return ([0] * len(_snake_case )) + [1]
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
_UpperCAmelCase =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model ) + self.fairseq_offset
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ={self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase =self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase ="".join(_snake_case ).replace(_snake_case , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , "wb" ) as fi:
_UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_UpperCAmelCase =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 592 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCamelCase__ ( _lowerCamelCase ) ->List[str]:
_UpperCAmelCase =split_dict._to_yaml_list()
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_UpperCAmelCase =SplitDict._from_yaml_list(_lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase =None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_lowerCamelCase ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCamelCase__ ( _lowerCamelCase ) ->Any:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_UpperCAmelCase =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 592 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__UpperCAmelCase = "▁"
class lowercase_ ( a_ ):
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = BigBirdTokenizer
__magic_name__ : List[str] = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : Optional[Any] , _lowercase : List[Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]="<unk>" , _lowercase : Union[str, Any]="<s>" , _lowercase : Union[str, Any]="</s>" , _lowercase : int="<pad>" , _lowercase : Optional[Any]="[SEP]" , _lowercase : str="[MASK]" , _lowercase : List[str]="[CLS]" , **_lowercase : str , ):
lowerCAmelCase__ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
lowerCAmelCase__ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
lowerCAmelCase__ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
lowerCAmelCase__ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
lowerCAmelCase__ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
lowerCAmelCase__ : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _lowerCAmelCase ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def _lowerCAmelCase ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self : List[str] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ : Any = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 308 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase__ ( lowerCamelCase : List[str]=3_2 , lowerCamelCase : Optional[Any]=1_0 , lowerCamelCase : List[str]=1_0_0 , lowerCamelCase : Optional[Any]=1_0_2_6 , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]="data/tokenized_stories_train_wikitext103.jbl" , lowerCamelCase : Union[str, Any]="igf_context_pairs.jbl" , ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = generate_datasets(
lowerCamelCase , lowerCamelCase , number=lowerCamelCase , min_len=1_0_2_6 , trim=lowerCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCAmelCase__ : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowerCAmelCase__ : List[Any] = load_gpta("gpt2" ).to(lowerCamelCase )
print("computing perplexity on objective set" )
lowerCAmelCase__ : Union[str, Any] = compute_perplexity(lowerCamelCase , lowerCamelCase , lowerCamelCase ).item()
print("perplexity on objective set:" , lowerCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase__ ( lowerCamelCase : Tuple , lowerCamelCase : Dict=1_5 , lowerCamelCase : Tuple=1_2_8 , lowerCamelCase : Dict=1_0_0 , lowerCamelCase : Union[str, Any]="igf_model.pt" , ) -> List[Any]:
set_seed(4_2 )
# Load pre-trained model
lowerCAmelCase__ : Any = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowerCAmelCase__ : Any = SecondaryLearner(lowerCamelCase )
# Train secondary learner
lowerCAmelCase__ : Tuple = train_secondary_learner(
lowerCamelCase , lowerCamelCase , max_epochs=lowerCamelCase , batch_size=lowerCamelCase , eval_freq=1_0_0 , igf_model_path=lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple=3_2 , lowerCamelCase : Union[str, Any]=1_0_0_0 , lowerCamelCase : int=1_6 , lowerCamelCase : str=1.0 , lowerCamelCase : int=recopy_gpta , lowerCamelCase : Any=None , lowerCamelCase : Optional[int]=1_0 , lowerCamelCase : Dict="gpt2_finetuned.pt" , ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase__ : Union[str, Any] = RandomSampler(lowerCamelCase )
lowerCAmelCase__ : Optional[int] = DataLoader(lowerCamelCase , sampler=lowerCamelCase )
lowerCAmelCase__ : Optional[int] = max_steps // (len(lowerCamelCase )) + 1
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Dict = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = recopy_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCamelCase )
secondary_learner.eval()
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Tuple = []
# Compute the performance of the transformer model at the beginning
lowerCAmelCase__ : str = compute_perplexity(lowerCamelCase , lowerCamelCase , lowerCamelCase )
test_perps.append(lowerCamelCase )
print("Test perplexity, step" , lowerCamelCase , ":" , lowerCamelCase )
for epoch in range(int(lowerCamelCase ) ):
for step, example in enumerate(lowerCamelCase ):
torch.cuda.empty_cache()
lowerCAmelCase__ : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCAmelCase__ : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCAmelCase__ : Optional[Any] = model(lowerCamelCase , labels=lowerCamelCase )
lowerCAmelCase__ : Tuple = True
if secondary_learner is not None:
lowerCAmelCase__ : Tuple = secondary_learner.forward(
torch.tensor(lowerCamelCase , dtype=torch.long , device=lowerCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
lowerCAmelCase__ : Dict = -1
if predicted_q < threshold:
lowerCAmelCase__ : Union[str, Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCAmelCase__ : str = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCAmelCase__ : Dict = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCAmelCase__ : Dict = compute_perplexity(lowerCamelCase , lowerCamelCase , lowerCamelCase )
test_perps.append(lowerCamelCase )
print("Test perplexity, step" , lowerCamelCase , ":" , lowerCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=lowerCamelCase , default=lowerCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=lowerCamelCase , default=lowerCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=lowerCamelCase , type=lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=lowerCamelCase , default=lowerCamelCase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=3_2 , type=lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_0_0 , type=lowerCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_0_0 , type=lowerCamelCase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_0_0_0 , type=lowerCamelCase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_2_8 , type=lowerCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=1_6 , type=lowerCamelCase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=1_0 , type=lowerCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_0_0 , type=lowerCamelCase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_0_2_6 , type=lowerCamelCase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=1_5 , type=lowerCamelCase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=lowerCamelCase , type=lowerCamelCase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=lowerCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=lowerCamelCase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=lowerCamelCase , type=lowerCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=lowerCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowerCAmelCase__ : Optional[int] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowerCAmelCase__ : List[Any] = training_secondary_learner(
lowerCamelCase , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowerCAmelCase__ : Optional[Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = generate_datasets(
context_len=3_2 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_0_0 , min_len=1_0_2_6 , trim=lowerCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCamelCase , lowerCamelCase , lowerCamelCase , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=lowerCamelCase , secondary_learner=lowerCamelCase , eval_interval=1_0 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 308 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase_ (_UpperCAmelCase ):
@staticmethod
@abstractmethod
def lowerCamelCase__ ( a_ ) ->Optional[int]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 612 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 612 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class A (__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = """mvp"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase_=5_0267 , lowercase_=1024 , lowercase_=12 , lowercase_=4096 , lowercase_=16 , lowercase_=12 , lowercase_=4096 , lowercase_=16 , lowercase_=0.0 , lowercase_=0.0 , lowercase_="gelu" , lowercase_=1024 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=False , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=True , lowercase_=2 , lowercase_=2 , lowercase_=False , lowercase_=100 , lowercase_=800 , **lowercase_ , ) -> str:
'''simple docstring'''
_snake_case : int = vocab_size
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[Any] = d_model
_snake_case : Tuple = encoder_ffn_dim
_snake_case : Optional[int] = encoder_layers
_snake_case : List[Any] = encoder_attention_heads
_snake_case : List[str] = decoder_ffn_dim
_snake_case : Optional[Any] = decoder_layers
_snake_case : Union[str, Any] = decoder_attention_heads
_snake_case : Tuple = dropout
_snake_case : str = attention_dropout
_snake_case : Optional[int] = activation_dropout
_snake_case : Union[str, Any] = activation_function
_snake_case : Dict = init_std
_snake_case : Optional[int] = encoder_layerdrop
_snake_case : Dict = decoder_layerdrop
_snake_case : Any = classifier_dropout
_snake_case : List[str] = use_cache
_snake_case : List[Any] = encoder_layers
_snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : List[str] = use_prompt
_snake_case : Any = prompt_length
_snake_case : Optional[int] = prompt_mid_dim
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowercase_ ):
_snake_case : Union[str, Any] = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 326 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class A (__UpperCAmelCase ):
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 326 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Dict = (DDPMScheduler,)
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.scheduler_classes[0]
UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.scheduler_classes[0]
UpperCamelCase : int = self.get_scheduler_config()
UpperCamelCase : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = self.dummy_model()
UpperCamelCase : str = self.dummy_sample_deter
UpperCamelCase : str = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase : List[str] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase : Optional[int] = pred_prev_sample
UpperCamelCase : Tuple = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : List[str] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCamelCase : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.dummy_model()
UpperCamelCase : str = self.dummy_sample_deter
UpperCamelCase : str = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase : Optional[int] = pred_prev_sample
UpperCamelCase : Dict = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.scheduler_classes[0]
UpperCamelCase : Dict = self.get_scheduler_config()
UpperCamelCase : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase : int = -1
else:
UpperCamelCase : List[Any] = timesteps[i + 1]
UpperCamelCase : int = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : Optional[Any] = self.get_scheduler_config()
UpperCamelCase : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.scheduler_classes[0]
UpperCamelCase : Tuple = self.get_scheduler_config()
UpperCamelCase : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [100, 87, 50, 1, 0]
UpperCamelCase : Any = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.scheduler_classes[0]
UpperCamelCase : Tuple = self.get_scheduler_config()
UpperCamelCase : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 643 |
import torch
from transformers import AutoModel
class UpperCAmelCase_ ( torch.nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
UpperCamelCase : List[str] = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase : List[Any] = torch.nn.Softmax(dim=1 )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = W_supports['''sizes'''].tolist()
UpperCamelCase : Optional[int] = W_supports['''start_token_id'''].item()
UpperCamelCase : Any = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase : Union[str, Any] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
UpperCamelCase : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase : Optional[int] = 0
else:
UpperCamelCase : Tuple = support_sizes[i - 1]
UpperCamelCase : Tuple = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase : List[str] = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase : List[str] = torch.vstack((p_starts, p_start) )
UpperCamelCase : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase : str = p_start
UpperCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 643 | 1 |
def A__ ( __A : List[str] , __A : List[Any] , __A : Any , __A : int ) ->Any:
if height >= 1:
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
move_disk(__lowerCamelCase , __lowerCamelCase )
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def A__ ( __A : str , __A : str ) ->Optional[int]:
print('''moving disk from''' , __lowerCamelCase , '''to''' , __lowerCamelCase )
def A__ ( ) ->Optional[Any]:
__A =int(input('''Height of hanoi: ''' ).strip() )
move_tower(__lowerCamelCase , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 184 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if number > 0:
raise ValueError("input must be a negative integer" )
lowercase_ :Optional[Any] = len(bin(__lowerCamelCase )[3:] )
lowercase_ :Optional[int] = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
lowercase_ :Dict = (
(
"1"
+ "0" * (binary_number_length - len(__lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 | 0 |
'''simple docstring'''
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=False ):
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ ):
UpperCAmelCase : Dict = len(set_a.intersection(__magic_name__ ) )
if alternative_union:
UpperCAmelCase : Dict = len(__magic_name__ ) + len(__magic_name__ )
else:
UpperCAmelCase : str = len(set_a.union(__magic_name__ ) )
return intersection / union
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(__magic_name__ , (list, tuple) ):
UpperCAmelCase : Optional[int] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase : Optional[int] = len(__magic_name__ ) + len(__magic_name__ )
return len(__magic_name__ ) / union
else:
UpperCAmelCase : Union[str, Any] = set_a + [element for element in set_b if element not in set_a]
return len(__magic_name__ ) / len(__magic_name__ )
return len(__magic_name__ ) / len(__magic_name__ )
return None
if __name__ == "__main__":
a : List[str] = {"a", "b", "c", "d", "e"}
a : int = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 609 |
'''simple docstring'''
from collections.abc import Callable
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
UpperCAmelCase : dict = {}
# Stores current size of heap.
UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCAmelCase : Tuple = key or (lambda snake_case : x)
def A_ ( self , snake_case ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.arr[j], self.arr[i]
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = self._left(snake_case )
UpperCAmelCase : Any = self._right(snake_case )
UpperCAmelCase : str = i
if left is not None and not self._cmp(snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = left
if right is not None and not self._cmp(snake_case , snake_case ):
UpperCAmelCase : Optional[int] = right
return valid_parent
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self._parent(snake_case )
while parent is not None and not self._cmp(snake_case , snake_case ):
self._swap(snake_case , snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[int] = parent, self._parent(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self._get_valid_parent(snake_case )
while valid_parent != index:
self._swap(snake_case , snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase : Optional[int] = self.pos_map[item]
UpperCAmelCase : Dict = [item, self.key(snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(snake_case )
self._heapify_down(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
UpperCAmelCase : Dict = self.arr[self.size - 1]
UpperCAmelCase : Any = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(snake_case )
self._heapify_down(snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(snake_case )] )
else:
UpperCAmelCase : Tuple = [item, self.key(snake_case )]
UpperCAmelCase : Any = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def A_ ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowercase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 609 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase : int = get_tests_dir("""fixtures""")
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = mock.Mock()
a__ : Any = 500
a__ : List[Any] = {}
a__ : List[str] = HTTPError
a__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
a__ : Dict = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowercase) as mock_head:
a__ : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json')
def __lowercase ( self) -> int:
'''simple docstring'''
with self.assertRaises(lowercase):
# config is in subfolder, the following should not work without specifying the subfolder
a__ : Optional[int] = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants')
a__ : int = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor')
self.assertIsNotNone(lowercase)
@is_staging_test
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowercase)
@classmethod
def __lowercase ( cls) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-image-processor')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor')
except HTTPError:
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = ViTImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token)
a__ : Dict = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='test-image-processor' , push_to_hub=lowercase , use_auth_token=self._token)
a__ : List[str] = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = ViTImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token)
a__ : Any = ViTImageProcessor.from_pretrained('valid_org/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='valid_org/test-image-processor-org' , push_to_hub=lowercase , use_auth_token=self._token)
a__ : int = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org')
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
a__ : List[Any] = CustomImageProcessor.from_pretrained(lowercase)
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
a__ : str = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=lowercase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor')
| 302 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : "DiagonalGaussianDistribution"
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : Union[str, Any] = True
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 4 , lowercase = 32 , lowercase = 32 , lowercase = 0.1_82_15 , ) -> str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
a__ : List[Any] = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
# pass init params to Decoder
a__ : List[str] = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , norm_num_groups=lowercase , act_fn=lowercase , )
a__ : Optional[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
a__ : Optional[Any] = nn.Convad(lowercase , lowercase , 1)
a__ : Optional[int] = False
a__ : Dict = False
# only relevant if vae tiling is enabled
a__ : Optional[int] = self.config.sample_size
a__ : List[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
a__ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
a__ : Dict = 0.25
def __lowercase ( self , lowercase , lowercase=False) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowercase , (Encoder, Decoder)):
a__ : Tuple = value
def __lowercase ( self , lowercase = True) -> int:
'''simple docstring'''
a__ : List[Any] = use_tiling
def __lowercase ( self) -> Dict:
'''simple docstring'''
self.enable_tiling(lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = True
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : str = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowercase ( self) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
a__ : int = {}
def fn_recursive_add_processors(lowercase , lowercase , lowercase):
if hasattr(lowercase , 'set_processor'):
a__ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , lowercase , lowercase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase , lowercase , lowercase)
return processors
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = len(self.attn_processors.keys())
if isinstance(lowercase , lowercase) and len(lowercase) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(lowercase)} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.')
def fn_recursive_attn_processor(lowercase , lowercase , lowercase):
if hasattr(lowercase , 'set_processor'):
if not isinstance(lowercase , lowercase):
module.set_processor(lowercase)
else:
module.set_processor(processor.pop(F'{name}.processor'))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , lowercase , lowercase)
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase , lowercase , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = True) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowercase , return_dict=lowercase)
if self.use_slicing and x.shape[0] > 1:
a__ : List[str] = [self.encoder(lowercase) for x_slice in x.split(1)]
a__ : str = torch.cat(lowercase)
else:
a__ : Tuple = self.encoder(lowercase)
a__ : Any = self.quant_conv(lowercase)
a__ : Optional[Any] = DiagonalGaussianDistribution(lowercase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase)
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowercase , return_dict=lowercase)
a__ : Optional[Any] = self.post_quant_conv(lowercase)
a__ : Union[str, Any] = self.decoder(lowercase)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
a__ : List[str] = [self._decode(lowercase).sample for z_slice in z.split(1)]
a__ : str = torch.cat(lowercase)
else:
a__ : Optional[Any] = self._decode(lowercase).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Dict = min(a.shape[2] , b.shape[2] , lowercase)
for y in range(lowercase):
a__ : Any = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : int = min(a.shape[3] , b.shape[3] , lowercase)
for x in range(lowercase):
a__ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowercase ( self , lowercase , lowercase = True) -> AutoencoderKLOutput:
'''simple docstring'''
a__ : Union[str, Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
a__ : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor)
a__ : str = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a__ : Union[str, Any] = []
for i in range(0 , x.shape[2] , lowercase):
a__ : int = []
for j in range(0 , x.shape[3] , lowercase):
a__ : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a__ : List[str] = self.encoder(lowercase)
a__ : Any = self.quant_conv(lowercase)
row.append(lowercase)
rows.append(lowercase)
a__ : Dict = []
for i, row in enumerate(lowercase):
a__ : Tuple = []
for j, tile in enumerate(lowercase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a__ : Any = self.blend_v(rows[i - 1][j] , lowercase , lowercase)
if j > 0:
a__ : int = self.blend_h(row[j - 1] , lowercase , lowercase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowercase , dim=3))
a__ : Dict = torch.cat(lowercase , dim=2)
a__ : Optional[Any] = DiagonalGaussianDistribution(lowercase)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase)
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
a__ : Tuple = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
a__ : Optional[int] = int(self.tile_sample_min_size * self.tile_overlap_factor)
a__ : List[str] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a__ : Optional[Any] = []
for i in range(0 , z.shape[2] , lowercase):
a__ : int = []
for j in range(0 , z.shape[3] , lowercase):
a__ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a__ : List[str] = self.post_quant_conv(lowercase)
a__ : Any = self.decoder(lowercase)
row.append(lowercase)
rows.append(lowercase)
a__ : int = []
for i, row in enumerate(lowercase):
a__ : str = []
for j, tile in enumerate(lowercase):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a__ : Tuple = self.blend_v(rows[i - 1][j] , lowercase , lowercase)
if j > 0:
a__ : Optional[Any] = self.blend_h(row[j - 1] , lowercase , lowercase)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowercase , dim=3))
a__ : Any = torch.cat(lowercase , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
def __lowercase ( self , lowercase , lowercase = False , lowercase = True , lowercase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
a__ : int = sample
a__ : int = self.encode(lowercase).latent_dist
if sample_posterior:
a__ : Dict = posterior.sample(generator=lowercase)
else:
a__ : Tuple = posterior.mode()
a__ : Any = self.decode(lowercase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
| 302 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = '''dandelin/vilt-b32-finetuned-vqa'''
UpperCAmelCase = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
UpperCAmelCase = '''image_qa'''
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ['''image''', '''text''']
UpperCAmelCase = ['''text''']
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(self , ["vision"] )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return self.pre_processor(__UpperCamelCase , __UpperCamelCase , return_tensors="pt" )
def a_ ( self , __UpperCamelCase ) -> Dict:
with torch.no_grad():
return self.model(**__UpperCamelCase ).logits
def a_ ( self , __UpperCamelCase ) -> List[str]:
_a = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 276 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase ) -> Optional[Any]:
# we need a list not a string, so do something to change the type
_a = arr.split("," )
def a_ ( self ) -> List[Any]:
_a = [int(self.array[0] )] * len(self.array )
_a = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_a = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_a = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase__ = input("please input some numbers:")
lowercase__ = SubArray(whole_array)
lowercase__ = array.solve_sub_array()
print(("the results is:", re))
| 276 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__lowercase : Optional[int] = logging.get_logger(__name__)
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , *A_ : str , **A_ : str ) -> None:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ ) | 564 | """simple docstring"""
import os
import numpy
import onnx
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = a.name
__snake_case = b.name
__snake_case = ''''''
__snake_case = ''''''
__snake_case = a == b
__snake_case = name_a
__snake_case = name_b
return res
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(snake_case, snake_case)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, snake_case, snake_case)
_graph_replace_input_with(node_proto.attribute[1].g, snake_case, snake_case)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
for n in graph_proto.node:
_node_replace_input_with(snake_case, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = list(model.graph.initializer)
__snake_case = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case = inits[i].name
__snake_case = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = os.path.dirname(snake_case)
__snake_case = os.path.basename(snake_case)
__snake_case = onnx.load(os.path.join(snake_case, snake_case))
__snake_case = list(model.graph.initializer)
__snake_case = set()
__snake_case = {}
__snake_case = []
__snake_case = 0
for i in range(len(snake_case)):
if i in dup_set:
continue
for j in range(i + 1, len(snake_case)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j]):
dup_set.add(snake_case)
dup_set.add(snake_case)
__snake_case = inits[j].data_type
__snake_case = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''', snake_case)
total_reduced_size += mem_size
__snake_case = inits[i].name
__snake_case = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case)
else:
__snake_case = [name_j]
ind_to_replace.append((j, i))
print('''total reduced size: ''', total_reduced_size / 10_24 / 10_24 / 10_24, '''GB''')
__snake_case = sorted(snake_case)
_remove_dup_initializers_from_model(snake_case, snake_case, snake_case)
__snake_case = '''optimized_''' + model_file_name
__snake_case = os.path.join(snake_case, snake_case)
onnx.save(snake_case, snake_case)
return new_model | 564 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_A : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Optional[int] , **A : int ) ->List[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase__ : Tuple = deprecated_arg[3:]
setattr(self , A , not kwargs.pop(A ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
lowerCamelCase__ : Dict = kwargs.pop('''torchscript''' , self.torchscript )
lowerCamelCase__ : Optional[int] = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowerCamelCase__ : List[Any] = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**A )
_UpperCAmelCase : bool = field(default=lowerCAmelCase_ ,metadata={"help": "Trace the models using torchscript"} )
_UpperCAmelCase : bool = field(default=lowerCAmelCase_ ,metadata={"help": "Print Xla/PyTorch tpu metrics"} )
_UpperCAmelCase : str = field(
default="O1" ,metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} ,)
@cached_property
def __lowerCamelCase ( self : Tuple ) ->Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowerCamelCase__ : int = torch.device('''cpu''' )
lowerCamelCase__ : str = 0
elif is_torch_tpu_available():
lowerCamelCase__ : Dict = xm.xla_device()
lowerCamelCase__ : Any = 0
else:
lowerCamelCase__ : Optional[int] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase__ : int = torch.cuda.device_count()
return device, n_gpu
@property
def __lowerCamelCase ( self : int ) ->Dict:
return is_torch_tpu_available() and self.tpu
@property
def __lowerCamelCase ( self : str ) ->int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowerCamelCase ( self : Dict ) ->"torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def __lowerCamelCase ( self : Tuple ) ->Dict:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def __lowerCamelCase ( self : Dict ) ->List[str]:
return self.n_gpu > 0
| 130 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Any , A : str ) ->int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowerCamelCase__ : Any = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(A )
def __lowerCamelCase ( self : List[str] ) ->List[str]:
lowerCamelCase__ : Optional[Any] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
lowerCamelCase__ : Tuple = TensorFlowBenchmark(A )
lowerCamelCase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Dict ) ->Optional[Any]:
lowerCamelCase__ : Tuple = '''sgugger/tiny-distilbert-classification'''
lowerCamelCase__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
lowerCamelCase__ : str = TensorFlowBenchmark(A )
lowerCamelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Tuple ) ->Dict:
lowerCamelCase__ : int = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : str = TensorFlowBenchmark(A )
lowerCamelCase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Union[str, Any] ) ->Tuple:
lowerCamelCase__ : Optional[int] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(A )
lowerCamelCase__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
lowerCamelCase__ : List[Any] = TensorFlowBenchmark(A , [config] )
lowerCamelCase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : List[Any] ) ->Any:
lowerCamelCase__ : Optional[int] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(A )
lowerCamelCase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : Optional[Any] = TensorFlowBenchmark(A , [config] )
lowerCamelCase__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : List[str] = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmark(A )
lowerCamelCase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : Tuple = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(A )
lowerCamelCase__ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : str = TensorFlowBenchmark(A , [config] )
lowerCamelCase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Dict = '''patrickvonplaten/t5-tiny-random'''
lowerCamelCase__ : int = AutoConfig.from_pretrained(A )
lowerCamelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
lowerCamelCase__ : Dict = TensorFlowBenchmark(A , configs=[config] )
lowerCamelCase__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def __lowerCamelCase ( self : Dict ) ->Dict:
lowerCamelCase__ : Dict = '''sshleifer/tiny-gpt2'''
lowerCamelCase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A , multi_process=A , )
lowerCamelCase__ : List[Any] = TensorFlowBenchmark(A )
lowerCamelCase__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : List[str] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(A , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(A , '''env.csv''' ) , multi_process=A , )
lowerCamelCase__ : Tuple = TensorFlowBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(A , '''env.csv''' ) ).exists() )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
lowerCamelCase__ : List[Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(A : int ):
self.assertTrue(hasattr(A , '''sequential''' ) )
self.assertTrue(hasattr(A , '''cumulative''' ) )
self.assertTrue(hasattr(A , '''current''' ) )
self.assertTrue(hasattr(A , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , '''log.txt''' ) , log_print=A , trace_memory_line_by_line=A , eager_mode=A , multi_process=A , )
lowerCamelCase__ : Any = TensorFlowBenchmark(A )
lowerCamelCase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A , '''log.txt''' ) ).exists() )
| 130 | 1 |
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : list ) -> bool:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(UpperCamelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(UpperCamelCase ) == 1:
return True
a__ = series[1] - series[0]
for index in range(len(UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __magic_name__ ( UpperCamelCase : list ) -> float:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(UpperCamelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
a__ = 0
for val in series:
answer += val
return answer / len(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
a : Optional[Any] = tuple[int, int]
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
a__ = vertices
a__ = {
(min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
a__ = weight
def lowercase__ ( self ) -> Graph:
"""simple docstring"""
a__ = Graph({min(self.vertices )} , {} )
a__ = 42
a__ = 42
a__ = 42
a__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
a__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
a__ = edge
a__ = weight
subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return subgraph
def __magic_name__ ( UpperCamelCase : str = "p107_network.txt" ) -> int:
a__ = os.path.abspath(os.path.dirname(UpperCamelCase ) )
a__ = os.path.join(UpperCamelCase , UpperCamelCase )
a__ = {}
a__ = 42
a__ = 42
a__ = 42
with open(UpperCamelCase ) as f:
a__ = f.read().strip().split('\n' )
a__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCamelCase ) ):
for edgea in range(UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
a__ = int(adjaceny_matrix[edgea][edgea] )
a__ = Graph(set(range(len(UpperCamelCase ) ) ) , UpperCamelCase )
a__ = graph.prims_algorithm()
a__ = sum(graph.edges.values() )
a__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __a ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_A : Any = DebertaTokenizer
_A : Dict = True
_A : Dict = DebertaTokenizerFast
def __A ( self : Tuple ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
SCREAMING_SNAKE_CASE__ =dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ ={"""unk_token""": """[UNK]"""}
SCREAMING_SNAKE_CASE__ =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_UpperCamelCase ) )
def __A ( self : List[Any] ,**_UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def __A ( self : Dict ,_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""lower newer"""
SCREAMING_SNAKE_CASE__ ="""lower newer"""
return input_text, output_text
def __A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ ="""lower newer"""
SCREAMING_SNAKE_CASE__ =["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE__ =tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ =[0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase )
def __A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ =tokenizer("""Hello""" ,"""World""" )
SCREAMING_SNAKE_CASE__ =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] ,_UpperCamelCase )
@slow
def __A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ =tokenizer.encode("""sequence builders""" ,add_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.encode(
"""sequence builders""" ,add_special_tokens=_UpperCamelCase ,add_prefix_space=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.encode(
"""sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=_UpperCamelCase ,add_prefix_space=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ,_UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __A ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
SCREAMING_SNAKE_CASE__ =tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ =[
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
SCREAMING_SNAKE_CASE__ =tokenizer(_UpperCamelCase ,padding=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[tokenizer.decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ) for seq in encoding["""input_ids"""]]
# fmt: off
SCREAMING_SNAKE_CASE__ ={
"""input_ids""": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
SCREAMING_SNAKE_CASE__ =[
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data ,_UpperCamelCase )
for expected, decoded in zip(_UpperCamelCase ,_UpperCamelCase ):
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
| 588 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_A : Union[str, Any] = KandinskyInpaintPipeline
_A : Any = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
_A : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_A : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_A : int = False
@property
def __A ( self : Any ) -> Dict:
'''simple docstring'''
return 3_2
@property
def __A ( self : Optional[int] ) -> int:
'''simple docstring'''
return 3_2
@property
def __A ( self : List[Any] ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : Optional[int] ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return 1_0_0
@property
def __A ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_0_0_5 ,)
SCREAMING_SNAKE_CASE__ =MultilingualCLIP(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =text_encoder.eval()
return text_encoder
@property
def __A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ =UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __A ( self : str ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ =self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ =self.dummy_unet
SCREAMING_SNAKE_CASE__ =self.dummy_movq
SCREAMING_SNAKE_CASE__ =DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule="""linear""" ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_UpperCamelCase ,set_alpha_to_one=_UpperCamelCase ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self : Union[str, Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Any=0 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_UpperCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ =floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE__ =Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
SCREAMING_SNAKE_CASE__ =np.ones((6_4, 6_4) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =0
if str(_UpperCamelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ =torch.manual_seed(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ =torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __A ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""cpu"""
SCREAMING_SNAKE_CASE__ =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ =self.pipeline_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =output.images
SCREAMING_SNAKE_CASE__ =pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
SCREAMING_SNAKE_CASE__ =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ =image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ =np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self : Dict ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ =np.ones((7_6_8, 7_6_8) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ ="""a hat"""
SCREAMING_SNAKE_CASE__ =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ =pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =pipe_prior(
_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
SCREAMING_SNAKE_CASE__ =pipeline(
_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,image_embeds=_UpperCamelCase ,negative_image_embeds=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type="""np""" ,)
SCREAMING_SNAKE_CASE__ =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_UpperCamelCase ,_UpperCamelCase )
| 588 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowercase__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_UpperCAmelCase = '''cvt'''
def __init__( self , snake_case=3 , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[64, 192, 384] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[4.0, 4.0, 4.0] , snake_case=[0.0, 0.0, 0.0] , snake_case=[0.0, 0.0, 0.0] , snake_case=[0.0, 0.0, 0.1] , snake_case=[True, True, True] , snake_case=[False, False, True] , snake_case=["dw_bn", "dw_bn", "dw_bn"] , snake_case=[3, 3, 3] , snake_case=[1, 1, 1] , snake_case=[2, 2, 2] , snake_case=[1, 1, 1] , snake_case=[1, 1, 1] , snake_case=0.02 , snake_case=1E-12 , **snake_case , ) -> Union[str, Any]:
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = depth
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = drop_rate
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = cls_token
_UpperCAmelCase = qkv_projection_method
_UpperCAmelCase = kernel_qkv
_UpperCAmelCase = padding_kv
_UpperCAmelCase = stride_kv
_UpperCAmelCase = padding_q
_UpperCAmelCase = stride_q
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
| 573 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCamelCase =False
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ):
return 12
@property
def _UpperCAmelCase ( self ):
return 12
@property
def _UpperCAmelCase ( self ):
return 32
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = 12
UpperCamelCase_ : Optional[int] = 12
UpperCamelCase_ : Optional[Any] = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
UpperCamelCase_ : Optional[Any] = TransformeraDModel(**__lowerCAmelCase )
return model
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = """cpu"""
UpperCamelCase_ : List[str] = self.dummy_vqvae
UpperCamelCase_ : str = self.dummy_text_encoder
UpperCamelCase_ : Optional[Any] = self.dummy_tokenizer
UpperCamelCase_ : Optional[int] = self.dummy_transformer
UpperCamelCase_ : Optional[Any] = VQDiffusionScheduler(self.num_embed )
UpperCamelCase_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__lowerCAmelCase )
UpperCamelCase_ : Tuple = VQDiffusionPipeline(
vqvae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , transformer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
UpperCamelCase_ : Union[str, Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = """teddy bear playing in the pool"""
UpperCamelCase_ : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
UpperCamelCase_ : List[Any] = pipe([prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase_ : Dict = output.images
UpperCamelCase_ : List[str] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
UpperCamelCase_ : Dict = pipe(
[prompt] , generator=__lowerCAmelCase , output_type="""np""" , return_dict=__lowerCAmelCase , num_inference_steps=2 )[0]
UpperCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase_ : List[str] = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = """cpu"""
UpperCamelCase_ : List[Any] = self.dummy_vqvae
UpperCamelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCamelCase_ : int = self.dummy_tokenizer
UpperCamelCase_ : Tuple = self.dummy_transformer
UpperCamelCase_ : Tuple = VQDiffusionScheduler(self.num_embed )
UpperCamelCase_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=__lowerCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCamelCase_ : Optional[Any] = VQDiffusionPipeline(
vqvae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , transformer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
UpperCamelCase_ : List[Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase_ : Tuple = """teddy bear playing in the pool"""
UpperCamelCase_ : List[str] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
UpperCamelCase_ : Tuple = pipe([prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase_ : List[str] = output.images
UpperCamelCase_ : Optional[int] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
UpperCamelCase_ : List[str] = pipe(
[prompt] , generator=__lowerCAmelCase , output_type="""np""" , return_dict=__lowerCAmelCase , num_inference_steps=2 )[0]
UpperCamelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase_ : List[Any] = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
UpperCamelCase_ : Optional[int] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
UpperCamelCase_ : str = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase_ : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
UpperCamelCase_ : str = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__lowerCAmelCase , output_type="""np""" , )
UpperCamelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 208 | 0 |
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase ):
UpperCamelCase__ = num_of_nodes
UpperCamelCase__ = []
UpperCamelCase__ = {}
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCamelCase__ = self.find_component(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
UpperCamelCase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
UpperCamelCase__ = self.find_component(__lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCamelCase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCamelCase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edge
UpperCamelCase__ = self.m_component[u]
UpperCamelCase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCamelCase__ = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _UpperCamelCase ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 |
import math
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
UpperCamelCase__ = [True] * n
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase__ = i * 2
while index < n:
UpperCamelCase__ = False
UpperCamelCase__ = index + i
UpperCamelCase__ = [2]
for i in range(3 , a__ , 2 ):
if is_prime[i]:
primes.append(a__ )
return primes
def _UpperCamelCase (a__ :int = 9999_6666_3333 ):
"""simple docstring"""
UpperCamelCase__ = math.floor(math.sqrt(a__ ) ) + 100
UpperCamelCase__ = prime_sieve(a__ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase__ = primes[prime_index + 1]
UpperCamelCase__ = last_prime**2
UpperCamelCase__ = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 548 | 1 |
def SCREAMING_SNAKE_CASE__ ( _lowercase : list[list[int]] , _lowercase : int , _lowercase : int , _lowercase : set ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ : Tuple = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : int = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
import enum
import shutil
import sys
__UpperCamelCase, __UpperCamelCase: Optional[int] = shutil.get_terminal_size()
__UpperCamelCase: Optional[Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class __lowerCAmelCase ( enum.Enum ):
'''simple docstring'''
_A = 0
_A = 1
def SCREAMING_SNAKE_CASE__ ( _lowercase : List[Any] , _lowercase : Union[str, Any]="" ) -> int:
'''simple docstring'''
sys.stdout.write(str(_lowercase ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int]="" ) -> str:
'''simple docstring'''
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , _lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
forceWrite('\r' )
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : str ) -> List[Any]:
'''simple docstring'''
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 266 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
_UpperCAmelCase = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCAmelCase = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCAmelCase = tf_top_k_top_p_filtering(a__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_UpperCAmelCase = output[output != -float('inf' )]
_UpperCAmelCase = tf.cast(
tf.where(tf.not_equal(a__ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a__ , a__ , rtol=1E-12 )
tf.debugging.assert_equal(a__ , a__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase , snake_case ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase__ = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 2
_UpperCAmelCase = 2
class lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , a__ ):
super(a__ , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=a__ , )
def __A ( self , a__ , a__ ):
_UpperCAmelCase = self.model.generate(
input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2, 0], [1_02, 1_03]]
_UpperCAmelCase = [[1, 0], [1, 1]]
_UpperCAmelCase = DummyModel(model=a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a__ , a__ , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(a__ ).signatures['serving_default']
for batch_size in range(1 , len(a__ ) + 1 ):
_UpperCAmelCase = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
_UpperCAmelCase = serving_func(**a__ )['sequences']
_UpperCAmelCase = test_model.generate(**a__ , max_new_tokens=a__ )
tf.debugging.assert_equal(a__ , a__ )
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 1
_UpperCAmelCase = 2
class lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , a__ ):
super(a__ , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=a__ , )
def __A ( self , a__ , a__ ):
_UpperCAmelCase = self.model.generate(
input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2], [1_02, 1_03]]
_UpperCAmelCase = [[1], [1, 1]]
_UpperCAmelCase = DummyModel(model=a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a__ , a__ , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(a__ ).signatures['serving_default']
for input_row in range(len(a__ ) ):
_UpperCAmelCase = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
_UpperCAmelCase = serving_func(**a__ )['sequences']
_UpperCAmelCase = test_model.generate(**a__ , max_new_tokens=a__ )
tf.debugging.assert_equal(a__ , a__ )
@slow
@require_tensorflow_text
def __A ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a__ )
class lowerCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_UpperCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a__ , 'spiece.model' ) , 'rb' ).read() )
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __A ( self , a__ , *a__ , **a__ ):
_UpperCAmelCase = self.tokenizer.tokenize(a__ )
_UpperCAmelCase , _UpperCAmelCase = text.pad_model_inputs(
a__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_UpperCAmelCase = self.model.generate(input_ids=a__ , attention_mask=a__ )
return self.tokenizer.detokenize(a__ )
_UpperCAmelCase = CompleteSentenceTransformer()
_UpperCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
_UpperCAmelCase = complete_model(a__ )
_UpperCAmelCase = tf.keras.Model(a__ , a__ )
keras_model.save(a__ )
def __A ( self ):
# Has PT equivalent: this test relies on random sampling
_UpperCAmelCase = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
_UpperCAmelCase = 14
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 'Hello, my dog is cute and'
_UpperCAmelCase = tokenizer(a__ , return_tensors='tf' )
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**a__ , eos_token_id=a__ , **a__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_UpperCAmelCase = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**a__ , eos_token_id=a__ , **a__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self ):
# Has PT equivalent: ample use of framework-specific code
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = 'Hugging Face is a technology company based in New York and Paris.'
_UpperCAmelCase = bart_tokenizer(a__ , return_tensors='tf' ).input_ids
_UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(a__ ).numpy()
class lowerCAmelCase ( snake_case ):
'''simple docstring'''
def __A ( self , a__ , a__=None , **a__ ):
return super().call(a__ , **a__ )
_UpperCAmelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(a__ , foo='bar' ).numpy()
self.assertTrue(np.array_equal(a__ , a__ ) )
class lowerCAmelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def __A ( self , a__ , **a__ ):
return super().call(a__ , **a__ )
_UpperCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
_UpperCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCAmelCase = bart_model.generate(a__ ).numpy()
with self.assertRaises(a__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a__ , foo='bar' )
| 709 |
"""simple docstring"""
from itertools import product
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(SCREAMING_SNAKE_CASE,max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE,repeat=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(SCREAMING_SNAKE_CASE )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4,dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6,dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(SCREAMING_SNAKE_CASE,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(SCREAMING_SNAKE_CASE,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 494 | 0 |
from __future__ import annotations
def lowercase__ ( A_: list[int] ) -> bool:
"""simple docstring"""
return len(set(A_ ) ) == len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
"""simple docstring"""
import datasets
A_ = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
A_ = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
A_ = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def _UpperCamelCase ( A , A ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self: Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ):
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
| 391 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> list[int]:
"""simple docstring"""
a_ : Tuple = [True] * limit
a_ : str = False
a_ : int = False
a_ : int = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a_ : Optional[Any] = i * 2
while index < limit:
a_ : Optional[int] = False
a_ : Any = index + i
a_ : Dict = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_00_00 ) -> int:
"""simple docstring"""
a_ : int = prime_sieve(__A )
a_ : List[str] = 0
a_ : Dict = 0
for i in range(len(__A ) ):
for j in range(i + length , len(__A ) ):
a_ : Optional[int] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a_ : List[Any] = j - i
a_ : Any = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 443 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Tuple , __A : Tuple=8 ) -> Dict:
"""simple docstring"""
a_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : VQModel , ) -> Any:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
if latents is None:
a_ : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Any = latents.to(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a_ : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
a_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : Optional[Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
a_ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> int:
a_ : Dict = self._execution_device
a_ : int = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a_ : Optional[Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : int = hint.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.scheduler.timesteps
a_ : Optional[int] = self.movq.config.latent_channels
a_ , a_ : Dict = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
a_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
a_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : int = {'image_embeds': image_embeds, 'hint': hint}
a_ : str = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
a_ , a_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : str = noise_pred.chunk(2 )
a_ , a_ : Union[str, Any] = variance_pred.chunk(2 )
a_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : str = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
a_ : str = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : str = image * 0.5 + 0.5
a_ : Union[str, Any] = image.clamp(0 , 1 )
a_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 443 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
def __init__( self : List[Any] ,a__ : Dict=2 ,a__ : int=3 ,a__ : int=64 ,a__ : str=None) -> str:
"""simple docstring"""
_lowerCAmelCase:int = np.random.default_rng(a__)
_lowerCAmelCase:Optional[int] = length
_lowerCAmelCase:List[Any] = rng.normal(size=(length,)).astype(np.floataa)
_lowerCAmelCase:Optional[Any] = a * self.x + b + rng.normal(scale=0.1 ,size=(length,)).astype(np.floataa)
def __len__( self : List[str]) -> Tuple:
"""simple docstring"""
return self.length
def __getitem__( self : Dict ,a__ : int) -> str:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
def __init__( self : List[Any] ,a__ : Dict=0 ,a__ : Any=0 ,a__ : Any=False) -> List[str]:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3]).float())
_lowerCAmelCase:Optional[int] = torch.nn.Parameter(torch.tensor([2, 3]).float())
_lowerCAmelCase:Dict = True
def __UpperCamelCase ( self : Optional[int] ,a__ : Optional[int]=None) -> Any:
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}')
_lowerCAmelCase:List[str] = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
def __init__( self : Union[str, Any] ,a__ : Dict=0 ,a__ : int=0 ,a__ : Tuple=False) -> int:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:Tuple = torch.nn.Parameter(torch.tensor(a__).float())
_lowerCAmelCase:str = torch.nn.Parameter(torch.tensor(a__).float())
_lowerCAmelCase:List[str] = True
def __UpperCamelCase ( self : Dict ,a__ : Union[str, Any]=None) -> List[Any]:
"""simple docstring"""
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}')
_lowerCAmelCase:Any = False
return x * self.a + self.b
def UpperCAmelCase ( snake_case : Tuple , snake_case : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCAmelCase:Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCAmelCase:Any = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
_lowerCAmelCase:Dict = load_dataset('''csv''' , data_files=snake_case )
_lowerCAmelCase:Dict = datasets['''train'''].unique('''label''' )
_lowerCAmelCase:List[Any] = {v: i for i, v in enumerate(snake_case )}
def tokenize_function(snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase:Optional[Any] = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case , padding='''max_length''' )
if "label" in examples:
_lowerCAmelCase:Optional[int] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase:Tuple = datasets.map(
snake_case , batched=snake_case , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(snake_case : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_lowerCAmelCase:Union[str, Any] = DataLoader(tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=2 )
_lowerCAmelCase:Any = DataLoader(tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=1 )
return train_dataloader, eval_dataloader
| 227 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase__ = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def UpperCAmelCase ( snake_case : str = "dhaka" , snake_case : int = 5 ):
_lowerCAmelCase:Tuple = min(snake_case , 50 ) # Prevent abuse!
_lowerCAmelCase:Dict = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_lowerCAmelCase:List[Any] = requests.get('''https://www.google.com/search''' , params=snake_case , headers=snake_case )
_lowerCAmelCase:int = BeautifulSoup(html.text , '''html.parser''' )
_lowerCAmelCase:Tuple = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
_lowerCAmelCase:str = json.dumps(snake_case )
_lowerCAmelCase:Optional[Any] = json.loads(snake_case )
_lowerCAmelCase:int = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , snake_case , )
if not matched_google_image_data:
return 0
_lowerCAmelCase:Tuple = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(snake_case ) , )
_lowerCAmelCase:str = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
_lowerCAmelCase:List[str] = bytes(snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCAmelCase:str = bytes(snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCAmelCase:Dict = urllib.request.build_opener()
_lowerCAmelCase:int = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(snake_case )
_lowerCAmelCase:str = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
UpperCamelCase__ = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise
| 227 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = KandinskyVaaControlnetImgaImgPipeline
lowercase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
lowercase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
lowercase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__ = False
@property
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : Tuple ) -> Any:
'''simple docstring'''
return 100
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__UpperCamelCase =UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =self.dummy_unet
__UpperCamelCase =self.dummy_movq
__UpperCamelCase ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__UpperCamelCase =DDIMScheduler(**UpperCamelCase__ )
__UpperCamelCase ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=0 ) -> int:
'''simple docstring'''
__UpperCamelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
__UpperCamelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
__UpperCamelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
__UpperCamelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase =Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__UpperCamelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith('''mps''' ):
__UpperCamelCase =torch.manual_seed(UpperCamelCase__ )
else:
__UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
__UpperCamelCase ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''cpu'''
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**UpperCamelCase__ )
__UpperCamelCase =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__UpperCamelCase =pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
__UpperCamelCase =output.images
__UpperCamelCase =pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__UpperCamelCase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__UpperCamelCase =init_image.resize((512, 512) )
__UpperCamelCase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__UpperCamelCase =torch.from_numpy(np.array(UpperCamelCase__ ) ).float() / 2_55.0
__UpperCamelCase =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__UpperCamelCase ='''A robot, 4k photo'''
__UpperCamelCase =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
__UpperCamelCase =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__UpperCamelCase =pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
__UpperCamelCase =torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase =pipe_prior(
UpperCamelCase__ , image=UpperCamelCase__ , strength=0.85 , generator=UpperCamelCase__ , negative_prompt='''''' , ).to_tuple()
__UpperCamelCase =pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , hint=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
__UpperCamelCase =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 296 | """simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCamelCase =features.copy()
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ):
"""simple docstring"""
if issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =jsonl_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =[jsonl_path]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict=("train",) ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
__UpperCamelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if split:
__UpperCamelCase ={split: jsonl_path}
else:
__UpperCamelCase ='''train'''
__UpperCamelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
return json.load(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return [json.loads(__UpperCamelCase ) for line in buffer]
class _lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}"""
__UpperCamelCase =str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
assert exported_content == original_content
| 296 | 1 |
"""simple docstring"""
A__ : List[Any] = 8.31_4462 # Unit - J mol-1 K-1
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 153 |
"""simple docstring"""
import math
import qiskit
def _snake_case ( lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
lowerCamelCase_ : Optional[Any] =qiskit.QuantumRegister(4 , "qr" )
lowerCamelCase_ : List[Any] =qiskit.ClassicalRegister(2 , "cr" )
# list the entries
lowerCamelCase_ : Tuple =[input_a, input_a, carry_in]
lowerCamelCase_ : Union[str, Any] =qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase__ ) # measure the last two qbits
lowerCamelCase_ : Any =qiskit.Aer.get_backend("aer_simulator" )
lowerCamelCase_ : str =qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 153 | 1 |
import os
import numpy
import onnx
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = a.name
_UpperCamelCase = b.name
_UpperCamelCase = ''''''
_UpperCamelCase = ''''''
_UpperCamelCase = a == b
_UpperCamelCase = name_a
_UpperCamelCase = name_b
return res
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.path.dirname(lowerCAmelCase_ )
_UpperCamelCase = os.path.basename(lowerCAmelCase_ )
_UpperCamelCase = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_UpperCamelCase = list(model.graph.initializer )
_UpperCamelCase = set()
_UpperCamelCase = {}
_UpperCamelCase = []
_UpperCamelCase = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_UpperCamelCase = inits[j].data_type
_UpperCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , lowerCAmelCase_ )
total_reduced_size += mem_size
_UpperCamelCase = inits[i].name
_UpperCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_UpperCamelCase = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
_UpperCamelCase = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCamelCase = '''optimized_''' + model_file_name
_UpperCamelCase = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 703 |
from math import isclose, sqrt
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> tuple[float, float, float]:
"""simple docstring"""
_UpperCamelCase = point_y / 4 / point_x
_UpperCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_UpperCamelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_UpperCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_UpperCamelCase = outgoing_gradient**2 + 4
_UpperCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_UpperCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
_UpperCamelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_UpperCamelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_UpperCamelCase = x_minus if isclose(lowerCAmelCase , lowerCAmelCase ) else x_plus
_UpperCamelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __A(lowerCAmelCase = 1.4 , lowerCAmelCase = -9.6 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = first_x_coord
_UpperCamelCase = first_y_coord
_UpperCamelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = next_point(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 202 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_snake_case : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowerCamelCase_ , cache_dir=lowerCamelCase_ )
_snake_case : int = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_ , os.listdir(lowerCamelCase_ )[0] , 'snapshots' ) )]
_snake_case : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case , _snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_snake_case : Any = jax.random.PRNGKey(0 )
_snake_case : str = 4
_snake_case : Optional[int] = jax.device_count()
_snake_case : Union[str, Any] = num_samples * [prompt]
_snake_case : Union[str, Any] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_snake_case : Union[str, Any] = replicate(lowerCamelCase_ )
_snake_case : List[Any] = jax.random.split(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = shard(lowerCamelCase_ )
_snake_case : int = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5e-1
_snake_case : Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=lowerCamelCase_ )
_snake_case : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_snake_case : Optional[int] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = 50
_snake_case : Any = jax.device_count()
_snake_case : str = num_samples * [prompt]
_snake_case : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_snake_case : List[str] = replicate(lowerCamelCase_ )
_snake_case : Optional[int] = jax.random.split(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Any = shard(lowerCamelCase_ )
_snake_case : List[Any] = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5e-1
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ )
_snake_case : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_snake_case : Tuple = jax.random.PRNGKey(0 )
_snake_case : Dict = 50
_snake_case : Any = jax.device_count()
_snake_case : List[str] = num_samples * [prompt]
_snake_case : Optional[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_snake_case : Optional[Any] = replicate(lowerCamelCase_ )
_snake_case : Dict = jax.random.split(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = shard(lowerCamelCase_ )
_snake_case : List[Any] = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_snake_case : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_snake_case : List[str] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = 50
_snake_case : List[str] = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_snake_case : Dict = replicate(lowerCamelCase_ )
_snake_case : List[Any] = jax.random.split(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Dict = shard(lowerCamelCase_ )
_snake_case : List[Any] = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : str = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , )
_snake_case , _snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
_snake_case : Optional[int] = scheduler.create_state()
_snake_case : Union[str, Any] = scheduler_state
_snake_case : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_snake_case : Optional[int] = jax.random.PRNGKey(0 )
_snake_case : List[str] = 50
_snake_case : List[str] = jax.device_count()
_snake_case : str = num_samples * [prompt]
_snake_case : Tuple = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_snake_case : Optional[int] = replicate(lowerCamelCase_ )
_snake_case : Optional[int] = jax.random.split(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : int = shard(lowerCamelCase_ )
_snake_case : List[Any] = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5e-1
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_snake_case : Optional[Any] = jax.device_count()
_snake_case : Any = num_samples * [prompt]
_snake_case : Tuple = jax.random.split(jax.random.PRNGKey(0 ) , lowerCamelCase_ )
_snake_case , _snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , )
_snake_case : Optional[Any] = replicate(lowerCamelCase_ )
_snake_case : Optional[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
_snake_case : List[Any] = shard(lowerCamelCase_ )
_snake_case : Dict = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
_snake_case : int = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
_snake_case , _snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , use_memory_efficient_attention=lowerCamelCase_ , )
_snake_case : List[Any] = replicate(lowerCamelCase_ )
_snake_case : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
_snake_case : List[Any] = shard(lowerCamelCase_ )
_snake_case : Optional[int] = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
_snake_case : Optional[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 304 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : int=64 , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
_snake_case : Tuple = np.random.default_rng(lowerCamelCase_ )
_snake_case : Dict = length
_snake_case : Union[str, Any] = rng.normal(size=(length,) ).astype(np.floataa )
_snake_case : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[str] ):
'''simple docstring'''
return self.length
def __getitem__( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Any=0 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Tuple=False ):
'''simple docstring'''
super().__init__()
_snake_case : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_snake_case : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_snake_case : List[str] = True
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_snake_case : str = False
return x * self.a[0] + self.b[0]
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Optional[int]=0 , lowerCamelCase_ : int=False ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[Any] = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
_snake_case : List[str] = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
_snake_case : Dict = True
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str=None ):
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_snake_case : Any = False
return x * self.a + self.b
def A__( __lowerCAmelCase , __lowerCAmelCase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_snake_case : str = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case : List[str] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
_snake_case : Tuple = load_dataset('csv' , data_files=__lowerCAmelCase )
_snake_case : Any = datasets['train'].unique('label' )
_snake_case : Union[str, Any] = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Optional[Any] = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' )
if "label" in examples:
_snake_case : Optional[int] = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : Any = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_snake_case : int = DataLoader(tokenized_datasets['train'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
_snake_case : List[Any] = DataLoader(tokenized_datasets['validation'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 304 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 711 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy"
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 4, 64, 64) , _lowerCAmelCase=False ):
UpperCAmelCase__ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return image
def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" ):
UpperCAmelCase__ : int = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Optional[Any] = """bf16""" if fpaa else None
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained(
_lowerCAmelCase , subfolder="""unet""" , dtype=_lowerCAmelCase , revision=_lowerCAmelCase )
return model, params
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 77, 768) , _lowerCAmelCase=False ):
UpperCAmelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_latents(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Dict = self.get_encoder_hidden_states(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = model.apply(
{"""params""": params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCAmelCase__ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase__ : List[Any] = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_latents(_lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Any = self.get_encoder_hidden_states(_lowerCAmelCase , shape=(4, 77, 1024) , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Dict = model.apply(
{"""params""": params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCAmelCase__ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase__ : Any = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
| 79 |
def _lowerCamelCase ( __lowerCamelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 79 | 1 |
def lowerCamelCase__ ( A__ : int = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
__lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class a :
"""simple docstring"""
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__lowerCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCamelCase__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , _lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase: Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase: Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__UpperCAmelCase: Optional[int] = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase: Tuple = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase: Dict = train_dataset.features["""label"""].names
if training_args.do_eval:
__UpperCAmelCase: List[str] = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase: Dict = eval_dataset.features["""label"""].names
if training_args.do_predict:
__UpperCAmelCase: str = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase: List[str] = predict_dataset.features["""label"""].names
# Labels
__UpperCAmelCase: List[Any] = len(_lowercase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase: str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel={str(_lowercase ): label for i, label in enumerate(_lowercase )} , labelaid={label: i for i, label in enumerate(_lowercase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase: Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase: Tuple = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__UpperCAmelCase: str = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__UpperCAmelCase: str = False
def preprocess_function(_lowercase : Any ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=_lowercase , max_length=data_args.max_seq_length , truncation=_lowercase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase: Union[str, Any] = min(len(_lowercase ) , data_args.max_train_samples )
__UpperCAmelCase: int = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase: int = train_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowercase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase: Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
__UpperCAmelCase: str = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase: Any = eval_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__UpperCAmelCase: Optional[int] = min(len(_lowercase ) , data_args.max_predict_samples )
__UpperCAmelCase: str = predict_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
__UpperCAmelCase: List[Any] = predict_dataset.map(
_lowercase , batched=_lowercase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
__UpperCAmelCase: Union[str, Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase : EvalPrediction ):
__UpperCAmelCase: str = p.predictions[0] if isinstance(p.predictions , _lowercase ) else p.predictions
__UpperCAmelCase: Any = np.argmax(_lowercase , axis=1 )
return metric.compute(predictions=_lowercase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__UpperCAmelCase: Tuple = default_data_collator
elif training_args.fpaa:
__UpperCAmelCase: Optional[Any] = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 )
else:
__UpperCAmelCase: Dict = None
# Initialize our Trainer
__UpperCAmelCase: Dict = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
__UpperCAmelCase: Dict = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase: Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase: Optional[int] = last_checkpoint
__UpperCAmelCase: Any = trainer.train(resume_from_checkpoint=_lowercase )
__UpperCAmelCase: Optional[Any] = train_result.metrics
__UpperCAmelCase: Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
__UpperCAmelCase: str = min(_lowercase , len(_lowercase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , _lowercase )
trainer.save_metrics("""train""" , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase: int = trainer.evaluate(eval_dataset=_lowercase )
__UpperCAmelCase: str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
__UpperCAmelCase: Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics("""eval""" , _lowercase )
trainer.save_metrics("""eval""" , _lowercase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase: List[Any] = trainer.predict(_lowercase , metric_key_prefix="""predict""" )
__UpperCAmelCase: List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowercase )
)
__UpperCAmelCase: List[str] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics("""predict""" , _lowercase )
trainer.save_metrics("""predict""" , _lowercase )
__UpperCAmelCase: int = np.argmax(_lowercase , axis=1 )
__UpperCAmelCase: str = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(_lowercase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(_lowercase ):
__UpperCAmelCase: Union[str, Any] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main() | 523 | '''simple docstring'''
def UpperCamelCase__ ( _lowercase : List[Any] ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase__ ( _lowercase : dict[int, list[int]] ) -> list[tuple[int, int]]:
__UpperCAmelCase: Any = 0
__UpperCAmelCase: List[Any] = len(_lowercase ) # No of vertices in graph
__UpperCAmelCase: Optional[Any] = [0] * n
__UpperCAmelCase: Dict = [False] * n
def dfs(_lowercase : Any , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[int] ):
__UpperCAmelCase: List[str] = True
__UpperCAmelCase: int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowercase , _lowercase , _lowercase , id_ )
__UpperCAmelCase: Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__UpperCAmelCase: List[str] = min(low[at] , low[to] )
__UpperCAmelCase: list[tuple[int, int]] = []
for i in range(_lowercase ):
if not visited[i]:
dfs(_lowercase , -1 , _lowercase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod() | 523 | 1 |
"""simple docstring"""
import math
def __lowercase ( a : int ) -> list[int]:
__snake_case : Any =[]
__snake_case : Tuple =2
__snake_case : str =int(math.sqrt(a ) ) # Size of every segment
__snake_case : int =[True] * (end + 1)
__snake_case : int =[]
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__snake_case : Dict =False
start += 1
prime += in_prime
__snake_case : str =end + 1
__snake_case : Any =min(2 * end , a )
while low <= n:
__snake_case : Optional[Any] =[True] * (high - low + 1)
for each in in_prime:
__snake_case : Optional[int] =math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__snake_case : Optional[Any] =False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__snake_case : str =high + 1
__snake_case : List[Any] =min(high + end , a )
return prime
print(sieve(10**6))
| 497 |
"""simple docstring"""
def __lowercase ( a : str , a : str ) -> str:
__snake_case : int =len(a )
__snake_case : int =len(a )
__snake_case : int =(
first_str_length if first_str_length > second_str_length else second_str_length
)
__snake_case : list =[]
for char_count in range(a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 497 | 1 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : str =re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(__A , __A ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 94 | def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Tuple = int(snake_case__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase : List[str] = divmod(snake_case__, 2 )
return binary_recursive(snake_case__ ) + str(snake_case__ )
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Optional[int] = str(snake_case__ ).strip()
if not number:
raise ValueError("No input value was provided" )
__UpperCAmelCase : Optional[int] = "-" if number.startswith("-" ) else ""
__UpperCAmelCase : Optional[int] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(snake_case__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 382 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
SCREAMING_SNAKE_CASE = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def _lowerCamelCase ( __A : Tuple ) -> Any:
_UpperCAmelCase : str = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Dict = state_dict.pop(__A )
# emb -> embedding
if name.startswith('''emb.''' ):
_UpperCAmelCase : Tuple = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
_UpperCAmelCase : List[Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
_UpperCAmelCase : int = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , __A )
# ffn -> feed_forward
_UpperCAmelCase : Optional[Any] = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , __A )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
_UpperCAmelCase : List[Any] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
_UpperCAmelCase : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
_UpperCAmelCase : Dict = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
_UpperCAmelCase : Optional[int] = '''rwkv.''' + name
_UpperCAmelCase : Union[str, Any] = weight
return state_dict
def _lowerCamelCase ( __A : int , __A : List[str] , __A : int , __A : str=None , __A : Dict=None , __A : Dict=False , __A : Tuple=None ) -> Dict:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
_UpperCAmelCase : int = 50_277
_UpperCAmelCase : str = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
_UpperCAmelCase : Dict = PreTrainedTokenizerFast(tokenizer_file=__A )
_UpperCAmelCase : Union[str, Any] = len(__A )
tokenizer.save_pretrained(__A )
# 2. Build the config
_UpperCAmelCase : Union[str, Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Tuple = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
_UpperCAmelCase : Optional[int] = RwkvConfig(
vocab_size=__A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__A )
# 3. Download model file then convert state_dict
_UpperCAmelCase : Any = hf_hub_download(__A , __A )
_UpperCAmelCase : str = torch.load(__A , map_location='''cpu''' )
_UpperCAmelCase : List[Any] = convert_state_dict(__A )
# 4. Split in shards and save
_UpperCAmelCase : Union[str, Any] = shard_checkpoint(__A )
for shard_file, shard in shards.items():
torch.save(__A , os.path.join(__A , __A ) )
if index is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(__A , __A )
# Save the index as well
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase : Optional[int] = json.dumps(__A , indent=2 , sort_keys=__A ) + '''\n'''
f.write(__A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
_UpperCAmelCase : Dict = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : List[str] = torch.load(os.path.join(__A , __A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__A , __A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
_UpperCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(__A )
model.push_to_hub(__A , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 715 |
from __future__ import annotations
def _lowerCamelCase ( __A : int ) -> list[int]:
_UpperCAmelCase : List[str] = [True] * limit
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_UpperCAmelCase : List[str] = i * 2
while index < limit:
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = index + i
_UpperCAmelCase : Optional[int] = [2]
for i in range(3 , __A , 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def _lowerCamelCase ( __A : int = 1_000_000 ) -> int:
_UpperCAmelCase : Any = prime_sieve(__A )
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Tuple = 0
for i in range(len(__A ) ):
for j in range(i + length , len(__A ) ):
_UpperCAmelCase : List[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_UpperCAmelCase : List[str] = j - i
_UpperCAmelCase : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 186 | 0 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if isinstance(__UpperCamelCase , torch.Tensor ):
return image
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase__ : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase__ : Optional[int] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
UpperCAmelCase__ : Union[str, Any] = np.concatenate(__UpperCamelCase , axis=0 )
UpperCAmelCase__ : str = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase__ : List[str] = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase__ : Optional[int] = 2.0 * image - 1.0
UpperCAmelCase__ : List[Any] = torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase__ : List[Any] = torch.cat(__UpperCamelCase , dim=0 )
return image
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0.9995 ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , np.ndarray ):
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Optional[int] = va.device
UpperCAmelCase__ : Tuple = va.cpu().numpy()
UpperCAmelCase__ : Optional[int] = va.cpu().numpy()
UpperCAmelCase__ : Dict = np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) )
if np.abs(__UpperCamelCase ) > DOT_THRESHOLD:
UpperCAmelCase__ : Tuple = (1 - t) * va + t * va
else:
UpperCAmelCase__ : str = np.arccos(__UpperCamelCase )
UpperCAmelCase__ : int = np.sin(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = theta_a * t
UpperCAmelCase__ : int = np.sin(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase__ : Tuple = sin_theta_t / sin_theta_a
UpperCAmelCase__ : List[Any] = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase__ : Dict = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
return va
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = F.normalize(__UpperCamelCase , dim=-1 )
UpperCAmelCase__ : Union[str, Any] = F.normalize(__UpperCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for param in model.parameters():
UpperCAmelCase__ : Any = value
class __lowercase ( __lowerCamelCase ):
def __init__( self : Tuple ,A : AutoencoderKL ,A : CLIPTextModel ,A : CLIPModel ,A : CLIPTokenizer ,A : UNetaDConditionModel ,A : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] ,A : CLIPFeatureExtractor ,A : Optional[Any]=None ,A : Union[str, Any]=None ,A : str=None ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=A ,text_encoder=A ,clip_model=A ,tokenizer=A ,unet=A ,scheduler=A ,feature_extractor=A ,coca_model=A ,coca_tokenizer=A ,coca_transform=A ,)
UpperCAmelCase__ : List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size ,A )
else feature_extractor.size["""shortest_edge"""]
)
UpperCAmelCase__ : str = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,A )
set_requires_grad(self.clip_model ,A )
def __lowercase ( self : Optional[int] ,A : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def __lowercase ( self : int ):
'''simple docstring'''
self.enable_attention_slicing(A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
set_requires_grad(self.vae ,A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
set_requires_grad(self.vae ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
set_requires_grad(self.unet ,A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
set_requires_grad(self.unet ,A )
def __lowercase ( self : Dict ,A : str ,A : List[Any] ,A : int ):
'''simple docstring'''
# get the original timestep using init_timestep
UpperCAmelCase__ : Any = min(int(num_inference_steps * strength ) ,A )
UpperCAmelCase__ : List[Any] = max(num_inference_steps - init_timestep ,0 )
UpperCAmelCase__ : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowercase ( self : str ,A : Optional[int] ,A : Dict ,A : int ,A : Optional[int] ,A : Optional[Any] ,A : int=None ):
'''simple docstring'''
if not isinstance(A ,torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(A )}" )
UpperCAmelCase__ : int = image.to(device=A ,dtype=A )
if isinstance(A ,A ):
UpperCAmelCase__ : List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
UpperCAmelCase__ : Union[str, Any] = torch.cat(A ,dim=0 )
else:
UpperCAmelCase__ : List[Any] = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase__ : Any = 0.1_8_2_1_5 * init_latents
UpperCAmelCase__ : Tuple = init_latents.repeat_interleave(A ,dim=0 )
UpperCAmelCase__ : Any = randn_tensor(init_latents.shape ,generator=A ,device=A ,dtype=A )
# get latents
UpperCAmelCase__ : Optional[Any] = self.scheduler.add_noise(A ,A ,A )
UpperCAmelCase__ : Union[str, Any] = init_latents
return latents
def __lowercase ( self : List[Any] ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase__ : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
UpperCAmelCase__ : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def __lowercase ( self : str ,A : List[str] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.feature_extractor.preprocess(A )
UpperCAmelCase__ : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase__ : Optional[Any] = self.clip_model.get_image_features(A )
UpperCAmelCase__ : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=A )
UpperCAmelCase__ : Tuple = image_embeddings_clip.repeat_interleave(A ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowercase ( self : Any ,A : List[Any] ,A : List[Any] ,A : int ,A : int ,A : int ,A : List[str] ,A : Optional[int] ,):
'''simple docstring'''
UpperCAmelCase__ : Tuple = latents.detach().requires_grad_()
UpperCAmelCase__ : Tuple = self.scheduler.scale_model_input(A ,A )
# predict the noise residual
UpperCAmelCase__ : List[Any] = self.unet(A ,A ,encoder_hidden_states=A ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase__ : str = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase__ : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase__ : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase__ : int = torch.sqrt(A )
UpperCAmelCase__ : List[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,A ):
UpperCAmelCase__ : List[Any] = self.scheduler.sigmas[index]
UpperCAmelCase__ : Any = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase__ : List[Any] = 1 / 0.1_8_2_1_5 * sample
UpperCAmelCase__ : Union[str, Any] = self.vae.decode(A ).sample
UpperCAmelCase__ : Optional[int] = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase__ : Tuple = transforms.Resize(self.feature_extractor_size )(A )
UpperCAmelCase__ : List[Any] = self.normalize(A ).to(latents.dtype )
UpperCAmelCase__ : Union[str, Any] = self.clip_model.get_image_features(A )
UpperCAmelCase__ : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=A )
UpperCAmelCase__ : Union[str, Any] = spherical_dist_loss(A ,A ).mean() * clip_guidance_scale
UpperCAmelCase__ : List[Any] = -torch.autograd.grad(A ,A )[0]
if isinstance(self.scheduler ,A ):
UpperCAmelCase__ : List[str] = latents.detach() + grads * (sigma**2)
UpperCAmelCase__ : Optional[Any] = noise_pred_original
else:
UpperCAmelCase__ : Tuple = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Dict ,A : Union[torch.FloatTensor, PIL.Image.Image] ,A : Union[torch.FloatTensor, PIL.Image.Image] ,A : Optional[str] = None ,A : Optional[str] = None ,A : Optional[int] = 512 ,A : Optional[int] = 512 ,A : float = 0.6 ,A : Optional[int] = 50 ,A : Optional[float] = 7.5 ,A : Optional[int] = 1 ,A : float = 0.0 ,A : Optional[float] = 100 ,A : Optional[torch.Generator] = None ,A : Optional[str] = "pil" ,A : bool = True ,A : float = 0.8 ,A : float = 0.1 ,A : float = 0.1 ,):
'''simple docstring'''
if isinstance(A ,A ) and len(A ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(A )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(A ,torch.Generator ) and batch_size > 1:
UpperCAmelCase__ : int = [generator] + [None] * (batch_size - 1)
UpperCAmelCase__ : Union[str, Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
UpperCAmelCase__ : str = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase__ : Optional[Any] = """, """.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCAmelCase__ : Union[str, Any] = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCAmelCase__ : Optional[Any] = self.get_image_description(A )
# get prompt text embeddings for content and style
UpperCAmelCase__ : Any = self.tokenizer(
A ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=A ,return_tensors="""pt""" ,)
UpperCAmelCase__ : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase__ : List[str] = self.tokenizer(
A ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=A ,return_tensors="""pt""" ,)
UpperCAmelCase__ : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase__ : Tuple = slerp(A ,A ,A )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase__ : Any = text_embeddings.repeat_interleave(A ,dim=0 )
# set timesteps
UpperCAmelCase__ : List[Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase__ : Any = {}
if accepts_offset:
UpperCAmelCase__ : List[Any] = 1
self.scheduler.set_timesteps(A ,**A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.get_timesteps(A ,A ,self.device )
UpperCAmelCase__ : List[str] = timesteps[:1].repeat(A )
# Preprocess image
UpperCAmelCase__ : Tuple = preprocess(A ,A ,A )
UpperCAmelCase__ : str = self.prepare_latents(
A ,A ,A ,text_embeddings.dtype ,self.device ,A )
UpperCAmelCase__ : Tuple = preprocess(A ,A ,A )
UpperCAmelCase__ : Dict = self.prepare_latents(
A ,A ,A ,text_embeddings.dtype ,self.device ,A )
UpperCAmelCase__ : int = slerp(A ,A ,A )
if clip_guidance_scale > 0:
UpperCAmelCase__ : List[Any] = self.get_clip_image_embeddings(A ,A )
UpperCAmelCase__ : Any = self.get_clip_image_embeddings(A ,A )
UpperCAmelCase__ : Optional[Any] = slerp(
A ,A ,A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase__ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ : Dict = content_text_input.input_ids.shape[-1]
UpperCAmelCase__ : List[Any] = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=A ,return_tensors="""pt""" )
UpperCAmelCase__ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase__ : Optional[int] = uncond_embeddings.repeat_interleave(A ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase__ : Dict = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase__ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase__ : Union[str, Any] = torch.randn(A ,generator=A ,device="""cpu""" ,dtype=A ).to(
self.device )
else:
UpperCAmelCase__ : Optional[int] = torch.randn(A ,generator=A ,device=self.device ,dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCAmelCase__ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ : Optional[int] = {}
if accepts_eta:
UpperCAmelCase__ : Union[str, Any] = eta
# check if the scheduler accepts generator
UpperCAmelCase__ : str = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase__ : Optional[int] = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : int = self.scheduler.scale_model_input(A ,A )
# predict the noise residual
UpperCAmelCase__ : Tuple = self.unet(A ,A ,encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase__ : Optional[int] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.cond_fn(
A ,A ,A ,A ,A ,A ,A ,)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(A ,A ,A ,**A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase__ : Any = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase__ : int = self.vae.decode(A ).sample
UpperCAmelCase__ : Any = (image / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : int = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A ,nsfw_content_detected=A )
| 65 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ) -> str | Literal[False]:
'''simple docstring'''
UpperCamelCase__ : Any = list(__lowerCamelCase )
UpperCamelCase__ : str = list(__lowerCamelCase )
UpperCamelCase__ : Dict = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase__ : Dict = '''_'''
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : str = []
while True:
UpperCamelCase__ : Tuple = ['''$'''] * len(__lowerCamelCase )
UpperCamelCase__ : str = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 ,len(__lowerCamelCase ) ):
UpperCamelCase__ : Optional[Any] = compare_string(binary[i] ,binary[j] )
if k is False:
UpperCamelCase__ : Any = '''*'''
UpperCamelCase__ : int = '''*'''
temp.append('''X''' )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
UpperCamelCase__ : Tuple = list(set(__lowerCamelCase ) )
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : Sequence[float] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = []
for minterm in minterms:
UpperCamelCase__ : Optional[Any] = ''''''
for _ in range(__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : int ) -> bool:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = list(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = list(__lowerCamelCase )
UpperCamelCase__ : str = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowercase ( __lowerCamelCase : list[list[int]] ,__lowerCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Union[str, Any] = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : int = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase__ : Dict = j
if count == 1:
UpperCamelCase__ : int = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[str] = -1
UpperCamelCase__ : Dict = 0
for i in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : Any = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase__ : Optional[int] = count_n
UpperCamelCase__ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : List[str] = 0
def _lowercase ( __lowerCamelCase : list[str] ,__lowerCamelCase : list[str] ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : Optional[Any] = prime_implicants[i].count('''_''' )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = 1
return chart
def _lowercase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ : int = int(input('''Enter the no. of variables\n''' ) )
UpperCamelCase__ : Optional[Any] = [
float(__lowerCamelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCamelCase__ : Dict = decimal_to_binary(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Any = check(__lowerCamelCase )
print('''Prime Implicants are:''' )
print(__lowerCamelCase )
UpperCamelCase__ : Dict = prime_implicant_chart(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Tuple = selection(__lowerCamelCase ,__lowerCamelCase )
print('''Essential Prime Implicants are:''' )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 344 | 0 |
from collections.abc import Sequence
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> float:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = 0.0
for coeff in reversed(_UpperCamelCase ):
lowerCamelCase__: Tuple = result * x + coeff
return result
if __name__ == "__main__":
_lowercase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowercase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 242 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: int = tempfile.mkdtemp()
lowerCamelCase__: Tuple = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCamelCase__: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__: Optional[int] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowerCamelCase__: List[str] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def lowerCamelCase_ ( self : List[Any] , **__a : List[str] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : int , **__a : str ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : str , **__a : Tuple ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__: Dict = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self.get_tokenizer()
lowerCamelCase__: List[Any] = self.get_rust_tokenizer()
lowerCamelCase__: Dict = self.get_image_processor()
lowerCamelCase__: int = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__: Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
lowerCamelCase__: str = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__: Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__: List[str] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCamelCase__: str = self.get_image_processor(do_normalize=__a )
lowerCamelCase__: List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: str = self.get_image_processor()
lowerCamelCase__: Union[str, Any] = self.get_tokenizer()
lowerCamelCase__: List[str] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: int = self.prepare_image_inputs()
lowerCamelCase__: List[str] = image_processor(__a , return_tensors="""np""" )
lowerCamelCase__: Optional[Any] = processor(images=__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = self.get_image_processor()
lowerCamelCase__: Dict = self.get_tokenizer()
lowerCamelCase__: Optional[int] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: List[str] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__: Optional[Any] = processor(text=__a )
lowerCamelCase__: Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = self.get_image_processor()
lowerCamelCase__: Any = self.get_tokenizer()
lowerCamelCase__: Tuple = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: Dict = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__: int = self.prepare_image_inputs()
lowerCamelCase__: Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.get_image_processor()
lowerCamelCase__: List[Any] = self.get_tokenizer()
lowerCamelCase__: Optional[int] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__: Union[str, Any] = processor.batch_decode(__a )
lowerCamelCase__: Union[str, Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: str = self.get_image_processor()
lowerCamelCase__: Tuple = self.get_tokenizer()
lowerCamelCase__: Union[str, Any] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
lowerCamelCase__: Optional[int] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__: Tuple = self.prepare_image_inputs()
lowerCamelCase__: Optional[int] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 242 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , _A : Tuple , _A : int=100 , _A : int=13 , _A : Optional[int]=30 , _A : int=2 , _A : Dict=3 , _A : Union[str, Any]=True , _A : Tuple=True , _A : Optional[int]=32 , _A : str=5 , _A : Optional[Any]=4 , _A : int=37 , _A : str="gelu" , _A : Tuple=0.1 , _A : int=0.1 , _A : str=10 , _A : List[Any]=0.0_2 , _A : Any=3 , ):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : Union[str, Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : str = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 1
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase_ ( self : int , _A : List[str] , _A : Tuple , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = FlaxBeitModel(config=_A )
UpperCAmelCase__ : Tuple = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , _A : int , _A : List[Any] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase_ ( self : List[Any] , _A : int , _A : Any , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.type_sequence_label_size
UpperCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = FlaxBeitForImageClassification(_A )
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : List[Any] = model(_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBeitModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : int = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : Optional[int] = model_class(_A )
@jax.jit
def model_jitted(_A : Dict , **_A : Optional[Any] ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : int = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[Any] = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase__ : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Any ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=_A , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase__ : Optional[int] = np.ones((1, 196) , dtype=_A )
# forward pass
UpperCAmelCase__ : Any = model(pixel_values=_A , bool_masked_pos=_A )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 196, 8_192)
self.assertEqual(logits.shape , _A )
UpperCAmelCase__ : Optional[int] = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _A , atol=1e-2 ) )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase__ : List[str] = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=_A , return_tensors='''np''' )
# forward pass
UpperCAmelCase__ : Optional[int] = model(**_A )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : List[str] = (1, 1_000)
self.assertEqual(logits.shape , _A )
UpperCAmelCase__ : List[str] = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , _A , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 281
self.assertEqual(logits.argmax(-1 ).item() , _A )
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
UpperCAmelCase__ : Any = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''np''' )
# forward pass
UpperCAmelCase__ : Union[str, Any] = model(**_A )
UpperCAmelCase__ : Union[str, Any] = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = (1, 21_841)
self.assertEqual(logits.shape , _A )
UpperCAmelCase__ : str = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , _A , atol=1e-4 ) )
UpperCAmelCase__ : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _A )
| 75 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : Tuple = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class A ( unittest.TestCase ):
a_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
__UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
__UpperCAmelCase = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}] )
__UpperCAmelCase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
__UpperCAmelCase = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
# Legacy behavior
__UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=__a )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
__UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}]] )
__UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
__UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
] , )
@require_torch
def snake_case__ ( self : Optional[Any] ) -> Tuple:
import torch
__UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
__UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@require_tf
def snake_case__ ( self : int ) -> Tuple:
__UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
__UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@slow
@require_torch
def snake_case__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase = pipeline('''text-classification''' )
__UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__UpperCAmelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__UpperCAmelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
@slow
@require_tf
def snake_case__ ( self : List[Any] ) -> Dict:
__UpperCAmelCase = pipeline('''text-classification''' , framework='''tf''' )
__UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
__UpperCAmelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
__UpperCAmelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
def snake_case__ ( self : List[Any] , __a : List[Any] , __a : Optional[int] , __a : Any ) -> List[Any]:
__UpperCAmelCase = TextClassificationPipeline(model=__a , tokenizer=__a )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case__ ( self : int , __a : List[str] , __a : str ) -> Dict:
__UpperCAmelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__UpperCAmelCase = '''HuggingFace is in'''
__UpperCAmelCase = text_classifier(__a )
self.assertEqual(nested_simplify(__a ) , [{'''label''': ANY(__a ), '''score''': ANY(__a )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
__UpperCAmelCase = ['''HuggingFace is in ''', '''Paris is in France''']
__UpperCAmelCase = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , [{'''label''': ANY(__a ), '''score''': ANY(__a )}, {'''label''': ANY(__a ), '''score''': ANY(__a )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__UpperCAmelCase = text_classifier(__a , top_k=__a )
__UpperCAmelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__a ) , [[{'''label''': ANY(__a ), '''score''': ANY(__a )}] * N, [{'''label''': ANY(__a ), '''score''': ANY(__a )}] * N] , )
__UpperCAmelCase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
__UpperCAmelCase = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , {'''label''': ANY(__a ), '''score''': ANY(__a )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__UpperCAmelCase = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(__a ):
text_classifier(__a )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__UpperCAmelCase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(__a ) , [{'''label''': ANY(__a ), '''score''': ANY(__a )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 654 | '''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
def __a ( __UpperCAmelCase : int = 50000000 ) -> int:
"""simple docstring"""
lowerCamelCase_ : str = set()
lowerCamelCase_ : Tuple = int((limit - 24) ** (1 / 2) )
lowerCamelCase_ : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __UpperCAmelCase ) ) )
for primea in primes:
lowerCamelCase_ : str = primea * primea
for primea in primes:
lowerCamelCase_ : str = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ : Tuple = primea * primea * primea * primea
lowerCamelCase_ : List[str] = square + cube + tetr
if total >= limit:
break
ret.add(__UpperCAmelCase )
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 488 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __magic_name__ : List[Any] , __magic_name__ : Tuple=13 , __magic_name__ : int=7 , __magic_name__ : List[str]=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=True , __magic_name__ : List[Any]=99 , __magic_name__ : Tuple=32 , __magic_name__ : Optional[int]=5 , __magic_name__ : Dict=4 , __magic_name__ : List[Any]=37 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : int=512 , __magic_name__ : Any=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=4 , ) -> str:
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : List[str] = batch_size
lowerCamelCase_ : str = seq_length
lowerCamelCase_ : Optional[int] = is_training
lowerCamelCase_ : Optional[int] = use_attention_mask
lowerCamelCase_ : Optional[int] = use_token_type_ids
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Any = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Dict = hidden_act
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Optional[int] = type_vocab_size
lowerCamelCase_ : int = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Tuple = num_choices
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] = None
if self.use_attention_mask:
lowerCamelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : List[Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = config_and_inputs
lowerCamelCase_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
lowerCamelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = config_and_inputs
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
lowerCamelCase_ : Dict = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__magic_name__ )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
lowerCamelCase_ : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__magic_name__ )
lowerCamelCase_ : List[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase_ : Optional[int] = model(__magic_name__ )[0]
lowerCamelCase_ : Tuple = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase_ : List[str] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
lowerCamelCase_ : Optional[Any] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__magic_name__ )
lowerCamelCase_ : List[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase_ : Dict = model(__magic_name__ )[0]
# compare the actual values for a slice.
lowerCamelCase_ : List[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __magic_name__ , atol=1e-4 ) )
| 488 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DiTPipeline
UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase = False
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCamelCase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=UpperCamelCase_ , )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[str]=0 ) -> List[str]:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase_ )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCamelCase_ = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ = self.get_dummy_inputs(UpperCamelCase_ )
lowerCamelCase_ = pipe(**UpperCamelCase_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase_ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=UpperCamelCase_ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowerCamelCase_ = ['vase', 'umbrella', 'white shark', 'white wolf']
lowerCamelCase_ = pipe.get_label_ids(UpperCamelCase_ )
lowerCamelCase_ = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowerCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowerCamelCase_ = ['vase', 'umbrella']
lowerCamelCase_ = pipe.get_label_ids(UpperCamelCase_ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 704 |
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 158 |
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> bool:
if num < 0:
return False
__SCREAMING_SNAKE_CASE : int = num
__SCREAMING_SNAKE_CASE : int = 0
while num > 0:
__SCREAMING_SNAKE_CASE : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = (DEISMultistepScheduler,)
_SCREAMING_SNAKE_CASE :List[Any] = (("""num_inference_steps""", 25),)
def _a ( self , **_a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**_a )
return config
def _a ( self , _a=0 , **_a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("""num_inference_steps""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE__ : str = 0.1 * sample
SCREAMING_SNAKE_CASE__ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config(**_a )
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_a , _a , _a , **_a ).prev_sample
SCREAMING_SNAKE_CASE__ : Any = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self ) -> Tuple:
"""simple docstring"""
pass
def _a ( self , _a=0 , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("""num_inference_steps""" , _a )
SCREAMING_SNAKE_CASE__ : Any = self.dummy_sample
SCREAMING_SNAKE_CASE__ : Any = 0.1 * sample
SCREAMING_SNAKE_CASE__ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler.step(_a , _a , _a , **_a ).prev_sample
SCREAMING_SNAKE_CASE__ : str = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , _a=None , **_a ) -> Union[str, Any]:
"""simple docstring"""
if scheduler is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(**_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config(**_a )
SCREAMING_SNAKE_CASE__ : str = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : str = 10
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : str = model(_a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.step(_a , _a , _a ).prev_sample
return sample
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("""num_inference_steps""" , _a )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : str = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(_a , """set_timesteps""" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a , """set_timesteps""" ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE__ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE__ : Any = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_a , _a , _a , **_a ).prev_sample
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_a , _a , _a , **_a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DEISMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE__ : Tuple = self.full_loop(scheduler=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
SCREAMING_SNAKE_CASE__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ : str = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE__ : str = self.full_loop(scheduler=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def _a ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type="""deis""" , solver_order=_a , solver_type=_a , )
def _a ( self ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def _a ( self ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def _a ( self ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.full_loop()
SCREAMING_SNAKE_CASE__ : Dict = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ : List[str] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 10
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : List[Any] = model(_a , _a )
SCREAMING_SNAKE_CASE__ : int = scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
| 12 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_file.read()
SCREAMING_SNAKE_CASE__ : str = regexp.search(_a )
return match
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
SCREAMING_SNAKE_CASE__ : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ : Dict = regexp.finditer(_a )
SCREAMING_SNAKE_CASE__ : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_a ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_a ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 12 | 1 |
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' must been an int and positive"
SCREAMING_SNAKE_CASE : Any = True
# 0 and 1 are none primes.
if number <= 1:
SCREAMING_SNAKE_CASE : Optional[Any] = False
for divisor in range(2 , int(round(sqrt(lowercase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
break
# precondition
assert isinstance(lowercase , lowercase ), "'status' must been from type bool"
return status
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
SCREAMING_SNAKE_CASE : Union[str, Any] = list(range(2 , n + 1 ) )
SCREAMING_SNAKE_CASE : List[str] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase ) ):
for j in range(i + 1 , len(lowercase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
SCREAMING_SNAKE_CASE : Dict = 0
# filters actual prime numbers.
SCREAMING_SNAKE_CASE : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n > 2), "'N' must been an int and > 2"
SCREAMING_SNAKE_CASE : int = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowercase ):
ans.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and number >= 0, "'number' must been an int and >= 0"
SCREAMING_SNAKE_CASE : List[Any] = [] # this list will be returns of the function.
# potential prime number factors.
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : int = number
if number == 0 or number == 1:
ans.append(lowercase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase ):
while quotient != 1:
if is_prime(lowercase ) and (quotient % factor == 0):
ans.append(lowercase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE : List[str] = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE : Any = prime_factorization(lowercase )
SCREAMING_SNAKE_CASE : List[str] = max(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE : List[str] = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = min(lowercase )
# precondition
assert isinstance(lowercase , lowercase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase ) and (number > 2) and is_even(lowercase )
), "'number' must been an int, even and > 2"
SCREAMING_SNAKE_CASE : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
SCREAMING_SNAKE_CASE : Tuple = get_prime_numbers(lowercase )
SCREAMING_SNAKE_CASE : Tuple = len(lowercase )
# run variable for while-loops.
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = None
# exit variable. for break up the loops
SCREAMING_SNAKE_CASE : Any = True
while i < len_pn and loop:
SCREAMING_SNAKE_CASE : Any = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
SCREAMING_SNAKE_CASE : str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase , lowercase )
and (len(lowercase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE : List[str] = 0
while numbera != 0:
SCREAMING_SNAKE_CASE : List[str] = numbera % numbera
SCREAMING_SNAKE_CASE : Dict = numbera
SCREAMING_SNAKE_CASE : Optional[Any] = rest
# precondition
assert isinstance(lowercase , lowercase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
SCREAMING_SNAKE_CASE : Optional[int] = prime_factorization(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = prime_factorization(lowercase )
elif numbera == 1 or numbera == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[int] = max(lowercase , lowercase )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
SCREAMING_SNAKE_CASE : Optional[int] = prime_fac_a.count(lowercase )
SCREAMING_SNAKE_CASE : Tuple = prime_fac_a.count(lowercase )
for _ in range(max(lowercase , lowercase ) ):
ans *= n
else:
SCREAMING_SNAKE_CASE : List[Any] = prime_fac_a.count(lowercase )
for _ in range(lowercase ):
ans *= n
done.append(lowercase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
SCREAMING_SNAKE_CASE : int = prime_fac_a.count(lowercase )
for _ in range(lowercase ):
ans *= n
done.append(lowercase )
# precondition
assert isinstance(lowercase , lowercase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 0), "'number' must been a positive int"
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase ):
ans += 1
# precondition
assert isinstance(lowercase , lowercase ) and is_prime(
lowercase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
is_prime(lowercase ) and is_prime(lowercase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
SCREAMING_SNAKE_CASE : str = p_number_a + 1 # jump to the next number
SCREAMING_SNAKE_CASE : Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase ):
number += 1
while number < p_number_a:
ans.append(lowercase )
number += 1
# fetch the next prime number.
while not is_prime(lowercase ):
number += 1
# precondition
assert (
isinstance(lowercase , lowercase )
and ans[0] != p_number_a
and ans[len(lowercase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 1), "'n' must been int and >= 1"
SCREAMING_SNAKE_CASE : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowercase )
# precondition
assert ans[0] == 1 and ans[len(lowercase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (
number > 1
), "'number' must been an int and >= 1"
SCREAMING_SNAKE_CASE : Optional[int] = get_divisors(lowercase )
# precondition
assert (
isinstance(lowercase , lowercase )
and (divisors[0] == 1)
and (divisors[len(lowercase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
SCREAMING_SNAKE_CASE : List[str] = gcd(abs(lowercase ) , abs(lowercase ) )
# precondition
assert (
isinstance(lowercase , lowercase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been a int and >= 0"
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase ) and (n >= 0), "'n' must been an int and >= 0"
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : int = 1 # this will be return
for _ in range(n - 1 ):
SCREAMING_SNAKE_CASE : str = ans
ans += fiba
SCREAMING_SNAKE_CASE : Tuple = tmp
return ans
| 62 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Tuple =1
for i in range(1, num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
A__ : Optional[Any] =0
while number > 0:
A__ : List[Any] =number % 1_0
sum_of_digits += last_digit
A__ : str =number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> int:
A__ : List[str] =factorial(snake_case_ )
A__ : str =split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 416 | 0 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowercase = 'bert-base-cased'
_lowercase = 'fp16'
_lowercase = 'bf16'
_lowercase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase__ ( A__ ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[str] = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a ):
lowerCamelCase__: Union[str, Any] = self.dist_env.copy()
lowerCamelCase__: int = f"""{i + 1}"""
lowerCamelCase__: Any = strategy
with mockenv_context(**__a ):
lowerCamelCase__: Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a ):
lowerCamelCase__: List[str] = self.dist_env.copy()
lowerCamelCase__: List[str] = prefetch_policy
with mockenv_context(**__a ):
lowerCamelCase__: Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a ):
lowerCamelCase__: str = self.dist_env.copy()
lowerCamelCase__: Tuple = state_dict_type
with mockenv_context(**__a ):
lowerCamelCase__: int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: str = AutoModel.from_pretrained(__a )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCamelCase__: str = self.dist_env.copy()
lowerCamelCase__: Union[str, Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCamelCase__: Union[str, Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
lowerCamelCase__: Optional[Any] = """2000"""
with mockenv_context(**__a ):
lowerCamelCase__: Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCamelCase__: Optional[Any] = self.dist_env.copy()
lowerCamelCase__: str = """TRANSFORMER_BASED_WRAP"""
lowerCamelCase__: str = """T5Layer"""
with mockenv_context(**__a ):
lowerCamelCase__: Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(__a ) as cm:
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
lowerCamelCase__: Union[str, Any] = self.dist_env.copy()
lowerCamelCase__: int = """SIZE_BASED_WRAP"""
lowerCamelCase__: str = """0"""
with mockenv_context(**__a ):
lowerCamelCase__: Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCamelCase__: Dict = self.dist_env.copy()
lowerCamelCase__: Union[str, Any] = mp_dtype
with mockenv_context(**__a ):
lowerCamelCase__: List[str] = Accelerator()
if mp_dtype == "fp16":
lowerCamelCase__: Any = torch.floataa
elif mp_dtype == "bf16":
lowerCamelCase__: Dict = torch.bfloataa
lowerCamelCase__: str = MixedPrecision(param_dtype=__a , reduce_dtype=__a , buffer_dtype=__a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCamelCase__: List[Any] = self.dist_env.copy()
lowerCamelCase__: int = str(__a ).lower()
with mockenv_context(**__a ):
lowerCamelCase__: int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__a ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase__ ( A__ ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = 0.82
lowerCamelCase__: List[str] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
lowerCamelCase__: List[str] = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCamelCase__: List[str] = 160
lowerCamelCase__: Optional[int] = 160
lowerCamelCase__: Optional[int] = inspect.getfile(accelerate.test_utils )
lowerCamelCase__: Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: List[str] = os.path.join(self.test_scripts_folder , """test_performance.py""" )
lowerCamelCase__: Dict = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
lowerCamelCase__: Dict = cmd.copy()
for i, strategy in enumerate(__a ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: str = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
lowerCamelCase__: Optional[Any] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__a ):
lowerCamelCase__: Optional[Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
lowerCamelCase__: Union[str, Any] = len(__a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCamelCase__: Dict = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
lowerCamelCase__: Union[str, Any] = cmd_config[:-1]
lowerCamelCase__: List[Any] = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: str = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
lowerCamelCase__: int = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCamelCase__: str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__a ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 242 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : Optional[Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = num_channels
_lowercase : Tuple = image_size
_lowercase : str = patch_size
_lowercase : Union[str, Any] = text_seq_length
_lowercase : List[str] = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : Dict = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : int = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = coordinate_size
_lowercase : Tuple = shape_size
_lowercase : Any = num_labels
_lowercase : List[str] = num_choices
_lowercase : Optional[Any] = scope
_lowercase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase : Optional[Any] = text_seq_length
_lowercase : Dict = (image_size // patch_size) ** 2 + 1
_lowercase : Union[str, Any] = self.text_seq_length + self.image_seq_length
def __a ( self ):
_lowercase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : Optional[int] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Any = bbox[i, j, 0]
_lowercase : int = t
_lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : int = None
if self.use_input_mask:
_lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
_lowercase : str = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
_lowercase : Optional[Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase : str = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : str = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Any = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = self.num_labels
_lowercase : Optional[Any] = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Tuple = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Union[str, Any] = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Any = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __a ( self ):
_lowercase : List[Any] = LayoutLMvaModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : Optional[int] = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Tuple = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
_lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
_lowercase : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def __a ( self ):
_lowercase : Tuple = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_lowerCAmelCase )
_lowercase : List[Any] = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
_lowercase : Tuple = torch.tensor([[1, 2]] )
_lowercase : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowercase : int = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
_lowercase : Union[str, Any] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
_lowercase : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=A_ )
class lowerCAmelCase__ ( A_ ):
__a = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__a = Features({"""audio""": Audio()} )
__a = Features({"""labels""": ClassLabel} )
__a = """audio"""
__a = """labels"""
def lowercase ( self : List[str] , _lowerCamelCase : Optional[int] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _lowerCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
_snake_case = copy.deepcopy(self )
_snake_case = self.label_schema.copy()
_snake_case = features[self.label_column]
_snake_case = label_schema
return task_template
@property
def lowercase ( self : Dict ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 705 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Any:
_snake_case = [False] * len(__lowerCamelCase )
_snake_case = [-1] * len(__lowerCamelCase )
def dfs(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
_snake_case = True
_snake_case = c
for u in graph[v]:
if not visited[u]:
dfs(__lowerCamelCase , 1 - c )
for i in range(len(__lowerCamelCase ) ):
if not visited[i]:
dfs(__lowerCamelCase , 0 )
for i in range(len(__lowerCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCAmelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 430 | 0 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Height of hanoi: """ ).strip() )
move_tower(__UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 225 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[Any] ) -> None:
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 214 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : int
snake_case : TreeNode | None = None
snake_case : TreeNode | None = None
UpperCamelCase__ = namedtuple("CoinsDistribResult", "moves excess")
def _UpperCamelCase (a__ :TreeNode | None ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(a__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a__ :TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a__ ) != count_coins(a__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(a__ :TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCamelCase__ , UpperCamelCase__ = get_distrib(node.left )
UpperCamelCase__ , UpperCamelCase__ = get_distrib(node.right )
UpperCamelCase__ = 1 - left_distrib_excess
UpperCamelCase__ = 1 - right_distrib_excess
UpperCamelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(a__ )
+ abs(a__ )
)
UpperCamelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a__ , a__ )
return get_distrib(a__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello I believe in"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = text_generator(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCamelCase__ = text_generator(__lowerCAmelCase , stop_sequence=""" fe""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = pipeline(task="""text-generation""" , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase )
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("""""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCamelCase__ = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__lowerCAmelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowerCAmelCase , top_p=0.5 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCamelCase__ = logging.get_logger("""transformers.generation.utils""" )
UpperCamelCase__ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCAmelCase , cl.out )
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 )
self.assertNotIn(__lowerCAmelCase , cl.out )
| 548 | 0 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class _lowercase ( lowerCAmelCase_ ):
def __init__( self , *A__ , **A__ ) -> str:
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(__snake_case )
def UpperCamelCase ( self , A__=None , A__=None , A__=None , **A__ ) -> Optional[int]:
snake_case , snake_case = {}, {}
if padding is not None:
snake_case = padding
if truncation is not None:
snake_case = truncation
if top_k is not None:
snake_case = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A__ , A__ = None , **A__ ) -> int:
if isinstance(__snake_case , (Image.Image, str) ) and isinstance(__snake_case , __snake_case ):
snake_case = {'''image''': image, '''question''': question}
else:
snake_case = image
snake_case = super().__call__(__snake_case , **__snake_case )
return results
def UpperCamelCase ( self , A__ , A__=False , A__=False ) -> List[str]:
snake_case = load_image(inputs['''image'''] )
snake_case = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=__snake_case , truncation=__snake_case )
snake_case = self.image_processor(images=__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def UpperCamelCase ( self , A__ ) -> str:
snake_case = self.model(**__snake_case )
return model_outputs
def UpperCamelCase ( self , A__ , A__=5 ) -> int:
if top_k > self.model.config.num_labels:
snake_case = self.model.config.num_labels
if self.framework == "pt":
snake_case = model_outputs.logits.sigmoid()[0]
snake_case , snake_case = probs.topk(__snake_case )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
snake_case = scores.tolist()
snake_case = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 342 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'convbert'
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=768 , __snake_case=2 , __snake_case=9 , __snake_case=1 , __snake_case=None , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =initializer_range
__a =layer_norm_eps
__a =embedding_size
__a =head_ratio
__a =conv_kernel_size
__a =num_groups
__a =classifier_dropout
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 242 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a : Optional[int] = logging.getLogger(__name__)
@dataclass
class a ( _lowerCamelCase ):
snake_case_ = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "Whether to SortishSamler or not."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "whether to use adafactor"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "Dropout probability. Goes into model.config."} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Attention dropout probability. Goes into model.config."} )
snake_case_ = field(
default="linear" , metadata={"help": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 593 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , *_snake_case : Dict , **_snake_case : Tuple )->None:
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) | 504 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_, UpperCAmelCase_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
A__ , A__ = 1, 1
for _ in range(number_of_steps - 1 ):
A__ , A__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 719 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case : int = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_snake_case : Union[str, Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_snake_case : Dict = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self :int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCamelCase ( self :str , __UpperCamelCase :List[List[List[str]]] , __UpperCamelCase :List[List[str]] , __UpperCamelCase :int = 1 , __UpperCamelCase :int = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase , hypotheses=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase )
}
| 524 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def lowercase (snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int = 16_000 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
lowerCAmelCase = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE__ :
_a = field(default=A__ , metadata={'help': 'Name of a dataset from the datasets package'} )
_a = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a = field(
default=A__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
_a = field(
default=A__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_a = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_a = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_a = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_a = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_a = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_a = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_a = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_a = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_a = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a = field(
default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
_a = field(
default=A__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_a = field(
default=A__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_a = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a = field(
default=A__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_a = field(
default=A__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowercase ( self : Dict ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , SCREAMING_SNAKE_CASE__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def lowercase () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowerCAmelCase = DatasetDict()
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--label_column_name` to the correct text column - one of """
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ : Dict ):
lowerCAmelCase = []
for audio in batch[data_args.audio_column_name]:
lowerCAmelCase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
lowerCAmelCase = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase = {model_input_name: inputs.get(_UpperCamelCase )}
lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ : Union[str, Any] ):
lowerCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCAmelCase = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase = {model_input_name: inputs.get(_UpperCamelCase )}
lowerCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCAmelCase = {}, {}
for i, label in enumerate(_UpperCamelCase ):
lowerCAmelCase = str(_UpperCamelCase )
lowerCAmelCase = label
# Load the accuracy metric from the datasets package
lowerCAmelCase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : Dict ):
lowerCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
lowerCAmelCase = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
# Write model card and (optionally) push to hub
lowerCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 169 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
lowercase : str = BertConfig.from_json_file(_UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase : Any = BertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict(), _UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 319 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__magic_name__ = logging.getLogger(__name__)
@dataclass
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
A__ : bool = field(default=_snake_case , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
A__ : bool = field(
default=_snake_case , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A__ : bool = field(default=_snake_case , metadata={'''help''': '''whether to use adafactor'''} )
A__ : Optional[float] = field(
default=_snake_case , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
A__ : Optional[float] = field(
default=_snake_case , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
A__ : Optional[float] = field(default=_snake_case , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
A__ : Optional[float] = field(
default=_snake_case , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
A__ : Optional[str] = field(
default='''linear''' , metadata={'''help''': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 314 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : str = StableUnCLIPPipeline
snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case__ : int = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = 3_2
_lowerCamelCase : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_lowerCamelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
_lowerCamelCase : Dict = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_lowerCamelCase : int = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_lowerCamelCase : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
_lowerCamelCase : str = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = AutoencoderKL()
_lowerCamelCase : Dict = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : List[Any] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_lowerCamelCase : Optional[Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe('''anime turle''' , generator=__lowerCAmelCase , output_type='''np''' )
_lowerCamelCase : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_lowerCamelCase : Any = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : List[str] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_lowerCamelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 83 |
"""simple docstring"""
import argparse
import struct
import unittest
class a :
def __init__( self : List[str] , lowerCAmelCase : bytes ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =data
# Initialize hash values
SCREAMING_SNAKE_CASE_: int =[
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE_: List[Any] =[
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
SCREAMING_SNAKE_CASE_: int =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : bytes ) -> bytes:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =B"""\x80""" + (B"""\x00""" * (63 - (len(lowerCAmelCase ) + 8) % 64))
SCREAMING_SNAKE_CASE_: List[Any] =struct.pack(""">Q""" , (len(lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def lowerCamelCase__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE_: Optional[int] =list(struct.unpack(""">16L""" , lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE_: Any =(
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE_: Tuple =(
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE_: List[str] =(
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
SCREAMING_SNAKE_CASE_: Dict =self.ror(lowerCAmelCase , 6 ) ^ self.ror(lowerCAmelCase , 11 ) ^ self.ror(lowerCAmelCase , 25 )
SCREAMING_SNAKE_CASE_: Any =(e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
SCREAMING_SNAKE_CASE_: List[Any] =self.ror(lowerCAmelCase , 2 ) ^ self.ror(lowerCAmelCase , 13 ) ^ self.ror(lowerCAmelCase , 22 )
SCREAMING_SNAKE_CASE_: int =(a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE_: Optional[int] =(sa + maj) % 0X100_000_000
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =(
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
SCREAMING_SNAKE_CASE_: Dict =[a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE_: List[Any] =[
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE_: Tuple ="""""".join([hex(lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
'''simple docstring'''
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> None:
'''simple docstring'''
import hashlib
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowerCAmelCase ).hash , hashlib.shaaaa(lowerCAmelCase ).hexdigest() )
def __magic_name__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[str] =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[Any] =f.read()
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =bytes(lowercase , """utf-8""" )
print(SHAaaa(lowercase ).hash )
if __name__ == "__main__":
main()
| 409 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _a ( A_ ):
a_ : Optional[int] = '''blip_2_vision_model'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str=14_08 , SCREAMING_SNAKE_CASE__ : str=61_44 , SCREAMING_SNAKE_CASE__ : int=39 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_24 , SCREAMING_SNAKE_CASE__ : Dict=14 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.0_00_01 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : int=1e-10 , SCREAMING_SNAKE_CASE__ : List[str]=True , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
lowerCamelCase__ = qkv_bias
@classmethod
def _UpperCamelCase ( cls : int , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _a ( A_ ):
a_ : Any = '''blip_2_qformer'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=14_08 , **SCREAMING_SNAKE_CASE__ : List[str] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = cross_attention_frequency
lowerCamelCase__ = encoder_hidden_size
@classmethod
def _UpperCamelCase ( cls : int , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[str] ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowerCamelCase__ = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _a ( A_ ):
a_ : int = '''blip-2'''
a_ : Optional[int] = True
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[str]=32 , **SCREAMING_SNAKE_CASE__ : int ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
lowerCamelCase__ = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowerCamelCase__ = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowerCamelCase__ = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowerCamelCase__ = BlipaVisionConfig(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCamelCase__ = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.text_config.tie_word_embeddings
lowerCamelCase__ = self.text_config.is_encoder_decoder
lowerCamelCase__ = num_query_tokens
lowerCamelCase__ = self.vision_config.hidden_size
lowerCamelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.02
@classmethod
def _UpperCamelCase ( cls : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.qformer_config.to_dict()
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 708 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
UpperCamelCase_ : str = {"""target_lang""": """fi""", """source_lang""": """en"""}
UpperCamelCase_ : int = """>>zh<<"""
UpperCamelCase_ : Tuple = """Helsinki-NLP/"""
if is_torch_available():
UpperCamelCase_ : List[Any] = """pt"""
elif is_tf_available():
UpperCamelCase_ : Union[str, Any] = """tf"""
else:
UpperCamelCase_ : List[str] = """jax"""
@require_sentencepiece
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = MarianTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = True
def lowerCAmelCase_ ( self : str ):
super().setUp()
a__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
a__ = dict(zip(a__ ,range(len(a__ ) ) ) )
a__ = Path(self.tmpdirname )
save_json(a__ ,save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(a__ ,save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(a__ ,save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(a__ ,save_dir / VOCAB_FILES_NAMES["target_spm"] )
a__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Any ,**a__ : Dict ):
return MarianTokenizer.from_pretrained(self.tmpdirname ,**a__ )
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : List[Any] ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = "</s>"
a__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) ,a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) ,a__ )
def lowerCAmelCase_ ( self : Any ):
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"</s>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"<pad>" )
self.assertEqual(len(a__ ) ,9 )
def lowerCAmelCase_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def lowerCAmelCase_ ( self : Any ):
a__ = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
a__ = en_de_tokenizer(["I am a small frog"] ,return_tensors=a__ )
self.assertIsInstance(a__ ,a__ )
a__ = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(a__ ,batch.input_ids[0] )
a__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(a__ )
a__ = [x.name for x in Path(a__ ).glob("*" )]
self.assertIn("source.spm" ,a__ )
MarianTokenizer.from_pretrained(a__ )
def lowerCAmelCase_ ( self : str ):
a__ = self.get_tokenizer()
a__ = tok(
["I am a small frog" * 10_00, "I am a small frog"] ,padding=a__ ,truncation=a__ ,return_tensors=a__ )
self.assertIsInstance(a__ ,a__ )
self.assertEqual(batch.input_ids.shape ,(2, 5_12) )
def lowerCAmelCase_ ( self : List[str] ):
a__ = self.get_tokenizer()
a__ = tok(["I am a tiny frog", "I am a small frog"] ,padding=a__ ,return_tensors=a__ )
self.assertIsInstance(a__ ,a__ )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# fmt: off
a__ = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ ,model_name="Helsinki-NLP/opus-mt-en-de" ,revision="1a8c2263da11e68e50938f97e10cd57820bd504c" ,decode_kwargs={"use_source_tokenizer": True} ,)
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
a__ = "Tämä on testi"
a__ = "This is a test"
a__ = [76, 7, 20_47, 2]
a__ = [69, 12, 11, 9_40, 2]
a__ = tokenizer(a__ ).input_ids
self.assertListEqual(a__ ,a__ )
a__ = tokenizer(text_target=a__ ).input_ids
self.assertListEqual(a__ ,a__ )
a__ = tokenizer.decode(a__ ,skip_special_tokens=a__ )
self.assertEqual(a__ ,a__ )
| 331 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str ,a__ : int ,a__ : int ,a__ : int ,a__ : float ,a__ : int ,a__ : int ,a__ : int ,a__ : int ,a__ : str ,a__ : bool = False ,):
super().__init__()
a__ = nn.Embedding(a__ ,a__ )
a__ = nn.Embedding(a__ ,a__ )
a__ = False
a__ = nn.Dropout(p=a__ )
a__ = TaConfig(
vocab_size=a__ ,d_model=a__ ,num_heads=a__ ,d_kv=a__ ,d_ff=a__ ,dropout_rate=a__ ,feed_forward_proj=a__ ,is_decoder=a__ ,is_encoder_decoder=a__ ,)
a__ = nn.ModuleList()
for lyr_num in range(a__ ):
a__ = TaBlock(a__ )
self.encoders.append(a__ )
a__ = TaLayerNorm(a__ )
a__ = nn.Dropout(p=a__ )
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : Tuple ,a__ : Optional[int] ):
a__ = self.token_embedder(a__ )
a__ = encoder_input_tokens.shape[1]
a__ = torch.arange(a__ ,device=encoder_input_tokens.device )
x += self.position_encoding(a__ )
a__ = self.dropout_pre(a__ )
# inverted the attention mask
a__ = encoder_input_tokens.size()
a__ = self.get_extended_attention_mask(a__ ,a__ )
for lyr in self.encoders:
a__ = lyr(a__ ,a__ )[0]
a__ = self.layer_norm(a__ )
return self.dropout_post(a__ ), encoder_inputs_mask
| 331 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : Any = (32, 32)
_UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__UpperCamelCase)
return image
@property
def snake_case__ ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def snake_case__ ( self) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__UpperCamelCase)
@property
def snake_case__ ( self) -> str:
"""simple docstring"""
def extract(*_A , **_A):
class lowercase :
'''simple docstring'''
def __init__( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = torch.ones([0])
def snake_case__ ( self , _A) -> Optional[Any]:
"""simple docstring"""
self.pixel_values.to(__UpperCamelCase)
return self
return Out()
return extract
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=__UpperCamelCase)
_UpperCAmelCase : Union[str, Any] = self.dummy_vae
_UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
_UpperCAmelCase : Dict = 77
_UpperCAmelCase : int = self.dummy_image.to(__UpperCamelCase)
_UpperCAmelCase : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Any = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase)
_UpperCAmelCase : Union[str, Any] = alt_pipe.to(__UpperCamelCase)
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase)
_UpperCAmelCase : Optional[int] = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase : str = torch.Generator(device=__UpperCamelCase).manual_seed(0)
_UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__UpperCamelCase , )
_UpperCAmelCase : Tuple = output.images
_UpperCAmelCase : Any = torch.Generator(device=__UpperCamelCase).manual_seed(0)
_UpperCAmelCase : Any = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : Tuple = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.dummy_cond_unet
_UpperCAmelCase : Dict = PNDMScheduler(skip_prk_steps=__UpperCamelCase)
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : Union[str, Any] = self.dummy_text_encoder
_UpperCAmelCase : Optional[int] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
_UpperCAmelCase : int = 77
_UpperCAmelCase : List[str] = self.dummy_image.to(__UpperCamelCase)
# put models in fp16
_UpperCAmelCase : int = unet.half()
_UpperCAmelCase : int = vae.half()
_UpperCAmelCase : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase)
_UpperCAmelCase : List[Any] = alt_pipe.to(__UpperCamelCase)
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase)
_UpperCAmelCase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase : str = torch.manual_seed(0)
_UpperCAmelCase : Optional[Any] = alt_pipe(
[prompt] , generator=__UpperCamelCase , num_inference_steps=2 , output_type='''np''' , image=__UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase : int = init_image.resize((760, 504))
_UpperCAmelCase : List[str] = '''BAAI/AltDiffusion'''
_UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing()
_UpperCAmelCase : Optional[int] = '''A fantasy landscape, trending on artstation'''
_UpperCAmelCase : Optional[int] = torch.manual_seed(0)
_UpperCAmelCase : Union[str, Any] = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type='''np''' , )
_UpperCAmelCase : List[str] = output.images[0]
_UpperCAmelCase : str = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_UpperCAmelCase : Optional[int] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
_UpperCAmelCase : Optional[Any] = init_image.resize((768, 512))
_UpperCAmelCase : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''')
_UpperCAmelCase : Dict = '''BAAI/AltDiffusion'''
_UpperCAmelCase : Any = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase)
pipe.set_progress_bar_config(disable=__UpperCamelCase)
pipe.enable_attention_slicing()
_UpperCAmelCase : int = '''A fantasy landscape, trending on artstation'''
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(0)
_UpperCAmelCase : Optional[int] = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type='''np''' , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-2
| 702 |
import unittest
from knapsack import knapsack as k
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[Any] = [0]
_UpperCAmelCase : Optional[Any] = [0]
_UpperCAmelCase : Optional[int] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0)
_UpperCAmelCase : Optional[int] = [60]
_UpperCAmelCase : List[str] = [10]
_UpperCAmelCase : str = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 0)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = 3
_UpperCAmelCase : int = [1, 2, 3]
_UpperCAmelCase : List[str] = [3, 2, 1]
_UpperCAmelCase : Union[str, Any] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 5)
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = 50
_UpperCAmelCase : Tuple = [60, 100, 120]
_UpperCAmelCase : Optional[int] = [10, 20, 30]
_UpperCAmelCase : Optional[Any] = len(_A)
self.assertEqual(k.knapsack(_A , _A , _A , _A) , 220)
if __name__ == "__main__":
unittest.main()
| 186 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_lowercase = {
'''facebook/xglm-564M''': 2_048,
}
class __a ( snake_case_ ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
'''simple docstring'''
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__lowercase = 7
__lowercase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__lowercase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
__lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__lowercase = len(self.sp_model )
__lowercase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCamelCase )
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
__lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__lowercase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase ))
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase ))
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = "".join(_lowerCamelCase ).replace(_lowerCamelCase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , "wb" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 118 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = ['input_values', 'attention_mask']
def __init__( self , lowercase = 1 , lowercase = 16000 , lowercase = 0.0 , lowercase = False , lowercase = 80 , lowercase = 16 , lowercase = 64 , lowercase = "hann_window" , lowercase = 1.0 , lowercase = 80 , lowercase = 7600 , lowercase = 1e-10 , lowercase = 2 , lowercase = True , **lowercase , ) -> int:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
lowerCamelCase_ = do_normalize
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = hop_length
lowerCamelCase_ = win_length
lowerCamelCase_ = win_function
lowerCamelCase_ = frame_signal_scale
lowerCamelCase_ = fmin
lowerCamelCase_ = fmax
lowerCamelCase_ = mel_floor
lowerCamelCase_ = reduction_factor
lowerCamelCase_ = win_length * sampling_rate // 1000
lowerCamelCase_ = hop_length * sampling_rate // 1000
lowerCamelCase_ = optimal_fft_length(self.sample_size )
lowerCamelCase_ = (self.n_fft // 2) + 1
lowerCamelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase )
lowerCamelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , lowercase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , lowercase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE_( lowercase , lowercase , lowercase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCamelCase_ = np.array(lowercase , np.intaa )
lowerCamelCase_ = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowerCamelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase_ = padding_value
normed_input_values.append(lowercase )
else:
lowerCamelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE_( self , lowercase , ) -> np.ndarray:
lowerCamelCase_ = spectrogram(
lowercase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
lowerCamelCase_ = self._process_audio(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , )
else:
lowerCamelCase_ = None
if audio_target is not None:
lowerCamelCase_ = self._process_audio(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase , )
if inputs is None:
return inputs_target
else:
lowerCamelCase_ = inputs_target["input_values"]
lowerCamelCase_ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
lowerCamelCase_ = isinstance(lowercase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowerCamelCase_ = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCamelCase_ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCamelCase_ = [self._extract_mel_features(lowercase ) for waveform in speech]
lowerCamelCase_ = BatchFeature({"input_values": features} )
lowerCamelCase_ = self.num_mel_bins
else:
lowerCamelCase_ = BatchFeature({"input_values": speech} )
lowerCamelCase_ = self.pad(
lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , **lowercase , )
lowerCamelCase_ = feature_size_hack
# convert input values to correct format
lowerCamelCase_ = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowercase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCamelCase_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowercase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCamelCase_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(lowercase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCamelCase_ = (
attention_mask
if self._get_padding_strategies(lowercase , max_length=lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=lowercase , padding_value=self.padding_value )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def SCREAMING_SNAKE_CASE_( self ) -> Dict[str, Any]:
lowerCamelCase_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCamelCase_ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 463 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCamelCase :
'''simple docstring'''
_A = None
def _UpperCAmelCase ( self : int ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : int ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(SCREAMING_SNAKE_CASE_ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
_a = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self : int ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
_a = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self : int ):
_a = self.feature_extraction_class()
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 562 |
from __future__ import annotations
from random import choice
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
return choice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
_a = random_pivot(_UpperCAmelCase )
# partition based on pivot
# linear time
_a = [e for e in lst if e < pivot]
_a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_UpperCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_UpperCAmelCase ) < k - 1:
return kth_number(_UpperCAmelCase , k - len(_UpperCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 562 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ = 1_000_000 ):
__UpperCamelCase : Dict = set(range(3 , snake_case__ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) )
__UpperCamelCase : List[Any] = [float(snake_case__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 700 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Union[str, Any] = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
__UpperCamelCase : List[Any] = hex_num[0] == "-"
if is_negative:
__UpperCamelCase : str = hex_num[1:]
try:
__UpperCamelCase : Optional[int] = int(snake_case__ , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
__UpperCamelCase : Tuple = ""
while int_num > 0:
__UpperCamelCase : Union[str, Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 399 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
__snake_case : List[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__snake_case : List[str] = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
__snake_case : List[Any] = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_snake_case : Tuple = input("Enter numbers separated by a comma:\n").strip()
_snake_case : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 81 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Any = SamImageProcessor()
__snake_case : Optional[int] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Dict = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[str] = [torch.ones((1, 3, 5, 5) )]
__snake_case : Tuple = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : int = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : List[str] = [np.ones((1, 3, 5, 5) )]
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : str = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
@require_vision
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : int = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **lowerCamelCase : Any ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : int = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__snake_case : List[Any] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : List[str] = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , tf.convert_to_tensor(lowerCamelCase ) , tf.convert_to_tensor(lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : List[str] = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str] , **lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : str = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Dict = [tf.convert_to_tensor(lowerCamelCase )]
__snake_case : List[Any] = [torch.tensor(lowerCamelCase )]
__snake_case : Optional[Any] = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = processor(images=lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : List[Any] = processor(images=lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
| 81 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowercase_ ( _lowerCamelCase: List[str] ) -> List[str]:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Optional[int] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCAmelCase , default=UpperCAmelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCAmelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCAmelCase )
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ):
__lowerCamelCase : int = model
__lowerCamelCase : str = cache
__lowerCamelCase : Dict = force
__lowerCamelCase : int = trust_remote_code
def lowerCamelCase__ ( self : List[Any] ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 717 | """simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__A = (720, 1280) # Height, Width
__A = (0.4, 0.6) # if height or width lower than this scale, drop it.
__A = 1 / 100
__A = ''''''
__A = ''''''
__A = ''''''
__A = 250
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[Any] = get_dataset(_lowerCamelCase , _lowerCamelCase )
for index in range(_lowerCamelCase ):
__lowerCamelCase : Optional[Any] = random.sample(range(len(_lowerCamelCase ) ) , 4 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = update_image_and_anno(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase : Tuple = random_chars(32 )
__lowerCamelCase : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__lowerCamelCase : List[str] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__lowerCamelCase : List[Any] = []
for anno in new_annos:
__lowerCamelCase : Any = anno[3] - anno[1]
__lowerCamelCase : Optional[int] = anno[4] - anno[2]
__lowerCamelCase : Optional[int] = anno[1] + width / 2
__lowerCamelCase : Union[str, Any] = anno[2] + height / 2
__lowerCamelCase : int = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_lowerCamelCase )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[list, list]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Any = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ):
__lowerCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__lowerCamelCase : Tuple = in_file.readlines()
__lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" )
__lowerCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
__lowerCamelCase : str = obj_list.rstrip("\n" ).split(" " )
__lowerCamelCase : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2
__lowerCamelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2
__lowerCamelCase : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCamelCase : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def lowercase_ ( _lowerCamelCase: list , _lowerCamelCase: list , _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, int] , _lowerCamelCase: tuple[float, float] , _lowerCamelCase: float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCamelCase : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : int = int(scale_x * output_size[1] )
__lowerCamelCase : Optional[Any] = int(scale_y * output_size[0] )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
for i, index in enumerate(_lowerCamelCase ):
__lowerCamelCase : List[str] = all_img_list[index]
path_list.append(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = all_annos[index]
__lowerCamelCase : List[str] = cva.imread(_lowerCamelCase )
if i == 0: # top-left
__lowerCamelCase : List[str] = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : str = bbox[1] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[2] * scale_y
__lowerCamelCase : Optional[int] = bbox[3] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCamelCase : str = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : List[Any] = bbox[2] * scale_y
__lowerCamelCase : Tuple = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCamelCase : Any = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : List[str] = img
for bbox in img_annos:
__lowerCamelCase : Any = bbox[1] * scale_x
__lowerCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : Dict = bbox[3] * scale_x
__lowerCamelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCamelCase : int = cva.resize(
_lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : Optional[Any] = img
for bbox in img_annos:
__lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCamelCase : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase_ ( _lowerCamelCase: int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''') | 366 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__ ( A_: str , A_: int=0.9_9_9 , A_: List[str]="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A_: Tuple ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A_: int ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCAmelCase =[]
for i in range(A_ ):
__UpperCAmelCase =i / num_diffusion_timesteps
__UpperCAmelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A_ ) / alpha_bar_fn(A_ ) , A_ ) )
return torch.tensor(A_ , dtype=torch.floataa )
class _A ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase : Dict = 2
@register_to_config
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int = 1000 , __SCREAMING_SNAKE_CASE : float = 0.00_085 , __SCREAMING_SNAKE_CASE : float = 0.012 , __SCREAMING_SNAKE_CASE : str = "linear" , __SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , __SCREAMING_SNAKE_CASE : str = "epsilon" , __SCREAMING_SNAKE_CASE : str = "linspace" , __SCREAMING_SNAKE_CASE : int = 0 , ) -> Optional[Any]:
if trained_betas is not None:
__UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCAmelCase =torch.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , __SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase =betas_for_alpha_bar(__SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__UpperCAmelCase =1.0 - self.betas
__UpperCAmelCase =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str=None ) -> Optional[int]:
if schedule_timesteps is None:
__UpperCAmelCase =self.timesteps
__UpperCAmelCase =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCAmelCase =1 if len(__SCREAMING_SNAKE_CASE ) > 1 else 0
else:
__UpperCAmelCase =timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
__UpperCAmelCase =self._index_counter[timestep_int]
return indices[pos].item()
@property
def _a ( self : List[str] ) -> Any:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _a ( self : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
__UpperCAmelCase =self.index_for_timestep(__SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
__UpperCAmelCase =self.sigmas[step_index]
else:
__UpperCAmelCase =self.sigmas_interpol[step_index]
__UpperCAmelCase =sample / ((sigma**2 + 1) ** 0.5)
return sample
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Optional[Any]:
__UpperCAmelCase =num_inference_steps
__UpperCAmelCase =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCAmelCase =np.linspace(0 , num_train_timesteps - 1 , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCAmelCase =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase =(np.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(__SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCAmelCase =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase =(np.arange(__SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(__SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__UpperCAmelCase =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCAmelCase =torch.from_numpy(np.log(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.interp(__SCREAMING_SNAKE_CASE , np.arange(0 , len(__SCREAMING_SNAKE_CASE ) ) , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCAmelCase =torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(device=__SCREAMING_SNAKE_CASE )
# interpolate sigmas
__UpperCAmelCase =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__UpperCAmelCase =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCAmelCase =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
# mps does not support float64
__UpperCAmelCase =torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
__UpperCAmelCase =torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
# interpolate timesteps
__UpperCAmelCase =self.sigma_to_t(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
__UpperCAmelCase =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__UpperCAmelCase =torch.cat([timesteps[:1], interleaved_timesteps] )
__UpperCAmelCase =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCAmelCase =defaultdict(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
# get log sigma
__UpperCAmelCase =sigma.log()
# get distribution
__UpperCAmelCase =log_sigma - self.log_sigmas[:, None]
# get sigmas range
__UpperCAmelCase =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__UpperCAmelCase =low_idx + 1
__UpperCAmelCase =self.log_sigmas[low_idx]
__UpperCAmelCase =self.log_sigmas[high_idx]
# interpolate sigmas
__UpperCAmelCase =(low - log_sigma) / (low - high)
__UpperCAmelCase =w.clamp(0 , 1 )
# transform interpolation to time range
__UpperCAmelCase =(1 - w) * low_idx + w * high_idx
__UpperCAmelCase =t.view(sigma.shape )
return t
@property
def _a ( self : str ) -> Optional[Any]:
return self.sample is None
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , __SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , __SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[SchedulerOutput, Tuple]:
__UpperCAmelCase =self.index_for_timestep(__SCREAMING_SNAKE_CASE )
# advance index counter by 1
__UpperCAmelCase =timestep.cpu().item() if torch.is_tensor(__SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCAmelCase =self.sigmas[step_index]
__UpperCAmelCase =self.sigmas_interpol[step_index + 1]
__UpperCAmelCase =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__UpperCAmelCase =self.sigmas[step_index - 1]
__UpperCAmelCase =self.sigmas_interpol[step_index]
__UpperCAmelCase =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCAmelCase =0
__UpperCAmelCase =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCAmelCase =sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase =sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCAmelCase =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCAmelCase =sigma_interpol - sigma_hat
# store for 2nd order step
__UpperCAmelCase =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__UpperCAmelCase =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__UpperCAmelCase =sigma_next - sigma_hat
__UpperCAmelCase =self.sample
__UpperCAmelCase =None
__UpperCAmelCase =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCAmelCase =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__SCREAMING_SNAKE_CASE ):
# mps does not support float64
__UpperCAmelCase =self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCAmelCase =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCAmelCase =self.timesteps.to(original_samples.device )
__UpperCAmelCase =timesteps.to(original_samples.device )
__UpperCAmelCase =[self.index_for_timestep(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for t in timesteps]
__UpperCAmelCase =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCAmelCase =sigma.unsqueeze(-1 )
__UpperCAmelCase =original_samples + noise * sigma
return noisy_samples
def __len__( self : int ) -> List[str]:
return self.config.num_train_timesteps
| 68 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = SwinConfig(image_size=192 )
if "base" in model_name:
_UpperCAmelCase : Tuple = 6
_UpperCAmelCase : Optional[Any] = 128
_UpperCAmelCase : Dict = (2, 2, 18, 2)
_UpperCAmelCase : List[Any] = (4, 8, 16, 32)
elif "large" in model_name:
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[int] = 192
_UpperCAmelCase : Optional[Any] = (2, 2, 18, 2)
_UpperCAmelCase : str = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
_UpperCAmelCase : Optional[int] = window_size
_UpperCAmelCase : Optional[int] = embed_dim
_UpperCAmelCase : List[Any] = depths
_UpperCAmelCase : Any = num_heads
return config
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
if "encoder.mask_token" in name:
_UpperCAmelCase : Dict = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
_UpperCAmelCase : Optional[int] = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
_UpperCAmelCase : Any = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
_UpperCAmelCase : int = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : Any = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
_UpperCAmelCase : Any = "layernorm.weight"
if name == "encoder.norm.bias":
_UpperCAmelCase : List[Any] = "layernorm.bias"
if "decoder" in name:
pass
else:
_UpperCAmelCase : Dict = "swin." + name
return name
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
_UpperCAmelCase : int = key.split("." )
_UpperCAmelCase : List[str] = int(key_split[2] )
_UpperCAmelCase : Dict = int(key_split[4] )
_UpperCAmelCase : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase : Optional[Any] = val[:dim, :]
_UpperCAmelCase : List[Any] = val[
dim : dim * 2, :
]
_UpperCAmelCase : Optional[Any] = val[-dim:, :]
else:
_UpperCAmelCase : Optional[int] = val[
:dim
]
_UpperCAmelCase : Dict = val[
dim : dim * 2
]
_UpperCAmelCase : Union[str, Any] = val[
-dim:
]
else:
_UpperCAmelCase : List[Any] = val
return orig_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_UpperCAmelCase : Union[str, Any] = get_swin_config(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = SwinForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCAmelCase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = ViTImageProcessor(size={"height": 192, "width": 192} )
_UpperCAmelCase : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
_UpperCAmelCase : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase : str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 289 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = 1_3
lowercase_ = 7
lowercase_ = 3_0
lowercase_ = self.seq_length + self.mem_len
lowercase_ = 1_5
lowercase_ = True
lowercase_ = True
lowercase_ = 9_9
lowercase_ = [1_0, 5_0, 8_0]
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 1_2_8
lowercase_ = 2
lowercase_ = 2
lowercase_ = None
lowercase_ = 1
lowercase_ = 0
lowercase_ = 3
lowercase_ = self.vocab_size - 1
lowercase_ = 0.01
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
random.seed(self.seed)
tf.random.set_seed(self.seed)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = TFTransfoXLModel(lowerCAmelCase_)
lowercase_ , lowercase_ = model(lowerCAmelCase_).to_tuple()
lowercase_ = {"""input_ids""": input_ids_a, """mems""": mems_a}
lowercase_ , lowercase_ = model(lowerCAmelCase_).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = TFTransfoXLLMHeadModel(lowerCAmelCase_)
lowercase_ , lowercase_ = model(lowerCAmelCase_).to_tuple()
lowercase_ = {"""input_ids""": input_ids_a, """labels""": lm_labels}
lowercase_ , lowercase_ = model(lowerCAmelCase_).to_tuple()
lowercase_ , lowercase_ = model([input_ids_a, mems_a]).to_tuple()
lowercase_ = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
lowercase_ , lowercase_ = model(lowerCAmelCase_).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
lowercase_ = TFTransfoXLForSequenceClassification(lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase__ = () if is_tf_available() else ()
lowercase__ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = TFTransfoXLModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , d_embed=3_7)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.model_tester.set_seed()
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.model_tester.set_seed()
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
lowercase_ = model.get_output_embeddings()
assert isinstance(lowerCAmelCase_ , tf.keras.layers.Layer)
lowercase_ = model.get_bias()
assert name is None
else:
lowercase_ = model.get_output_embeddings()
assert x is None
lowercase_ = model.get_bias()
assert name is None
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFTransfoXLModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""")
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""")
# fmt: off
lowercase_ = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase_ = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase_ = model.generate(lowerCAmelCase_ , max_length=2_0_0 , do_sample=lowerCAmelCase_)
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase_)
| 100 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[list[int]]:
'''simple docstring'''
lowercase_ = []
if len(__lowerCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(__lowerCAmelCase ) ):
lowercase_ = nums.pop(0 )
lowercase_ = permute(__lowerCAmelCase )
for perm in permutations:
perm.append(__lowerCAmelCase )
result.extend(__lowerCAmelCase )
nums.append(__lowerCAmelCase )
return result
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
def backtrack(__lowerCAmelCase ):
if start == len(__lowerCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(__lowerCAmelCase , len(__lowerCAmelCase ) ):
lowercase_ , lowercase_ = nums[i], nums[start]
backtrack(start + 1 )
lowercase_ , lowercase_ = nums[i], nums[start] # backtrack
lowercase_ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase : Optional[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 100 | 1 |
import baseaa
def _lowerCamelCase ( __lowerCamelCase ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _lowerCamelCase ( __lowerCamelCase ) -> str:
'''simple docstring'''
return baseaa.baadecode(__lowerCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = """Hello World!"""
SCREAMING_SNAKE_CASE__ : int = baseaa_encode(test)
print(encoded)
SCREAMING_SNAKE_CASE__ : Dict = baseaa_decode(encoded)
print(decoded)
| 79 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.0_2 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Tuple:
a_ : Union[str, Any] = parent
a_ : int = batch_size
a_ : int = seq_length
a_ : int = is_training
a_ : List[Any] = use_input_mask
a_ : str = use_token_type_ids
a_ : List[str] = use_labels
a_ : List[Any] = vocab_size
a_ : Dict = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : str = num_attention_heads
a_ : str = intermediate_size
a_ : Optional[int] = hidden_act
a_ : Optional[int] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : Optional[Any] = type_vocab_size
a_ : Any = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : Dict = scope
def UpperCamelCase__ ( self ) -> Any:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Union[str, Any] = None
if self.use_input_mask:
a_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Any = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : int = ids_tensor([self.batch_size] , self.num_choices )
a_ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Any:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , use_stable_embedding=_lowercase , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
a_ : Optional[Any] = OpenLlamaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : List[Any] = model(_lowercase , attention_mask=_lowercase )
a_ : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
a_ : int = True
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
a_ : Any = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
a_ : Optional[int] = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Any:
a_ : Dict = OpenLlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]:
a_ : Dict = True
a_ : Optional[int] = True
a_ : Dict = OpenLlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
a_ : List[str] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
a_ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
a_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
a_ : Dict = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
# select random slice
a_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Any = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Union[str, Any] = config_and_inputs
a_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__(a_, a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A : Optional[int] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : Any = False
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Optional[int] = OpenLlamaModelTester(self )
a_ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : Dict = type
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = 3
a_ : List[str] = input_dict["""input_ids"""]
a_ : List[str] = input_ids.ne(1 ).to(_lowercase )
a_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ : int = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Tuple = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = 3
a_ : List[Any] = """single_label_classification"""
a_ : Dict = input_dict["""input_ids"""]
a_ : int = input_ids.ne(1 ).to(_lowercase )
a_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ : Any = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Any = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Tuple:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = 3
a_ : List[str] = """multi_label_classification"""
a_ : Dict = input_dict["""input_ids"""]
a_ : Any = input_ids.ne(1 ).to(_lowercase )
a_ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a_ : Optional[int] = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCamelCase__ ( self , _lowercase ) -> str:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
a_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
original_model.to(_lowercase )
original_model.eval()
a_ : Union[str, Any] = original_model(_lowercase ).last_hidden_state
a_ : str = original_model(_lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a_ : int = {"""type""": scaling_type, """factor""": 1_0.0}
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
scaled_model.to(_lowercase )
scaled_model.eval()
a_ : Optional[int] = scaled_model(_lowercase ).last_hidden_state
a_ : Tuple = scaled_model(_lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
| 540 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_UpperCAmelCase : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_UpperCAmelCase : Dict = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
_UpperCAmelCase : Dict = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_UpperCAmelCase : str = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_UpperCAmelCase : Dict = """allenai"""
def __magic_name__( lowerCamelCase):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase = dict((re.sub(r'''@@$''', '''''', lowerCamelCase), v) if k.endswith('''@@''') else (re.sub(r'''$''', '''</w>''', lowerCamelCase), v) for k, v in d.items())
__lowerCAmelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCAmelCase = d[k] # restore
return da
def __magic_name__( lowerCamelCase, lowerCamelCase):
# prep
assert os.path.exists(lowerCamelCase)
os.makedirs(lowerCamelCase, exist_ok=lowerCamelCase)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowerCAmelCase = basename(lowerCamelCase)
__lowerCAmelCase = dirname(lowerCamelCase)
__lowerCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__lowerCAmelCase = cls.hub_models()
__lowerCAmelCase = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
__lowerCAmelCase = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""")
__lowerCAmelCase = hub_utils.from_pretrained(
lowerCamelCase, lowerCamelCase, lowerCamelCase, archive_map=lowerCamelCase, **lowerCamelCase)
__lowerCAmelCase = vars(chkpt['''args''']['''model'''])
__lowerCAmelCase = args['''source_lang''']
__lowerCAmelCase = args['''target_lang''']
__lowerCAmelCase = dirname(lowerCamelCase)
__lowerCAmelCase = basename(lowerCamelCase)
# dicts
__lowerCAmelCase = os.path.join(lowerCamelCase, F"""dict.{src_lang}.txt""")
__lowerCAmelCase = os.path.join(lowerCamelCase, F"""dict.{tgt_lang}.txt""")
__lowerCAmelCase = Dictionary.load(lowerCamelCase)
__lowerCAmelCase = rewrite_dict_keys(src_dict.indices)
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''vocab-src.json''')
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(json.dumps(lowerCamelCase, ensure_ascii=lowerCamelCase, indent=lowerCamelCase))
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__lowerCAmelCase = True
for k in src_vocab.keys():
if not k.islower():
__lowerCAmelCase = False
break
__lowerCAmelCase = Dictionary.load(lowerCamelCase)
__lowerCAmelCase = rewrite_dict_keys(tgt_dict.indices)
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = os.path.join(lowerCamelCase, '''vocab-tgt.json''')
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(json.dumps(lowerCamelCase, ensure_ascii=lowerCamelCase, indent=lowerCamelCase))
# merges_file (bpecodes)
__lowerCAmelCase = os.path.join(lowerCamelCase, VOCAB_FILES_NAMES['''merges_file'''])
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__lowerCAmelCase = os.path.join(lowerCamelCase, lowerCamelCase)
if os.path.exists(lowerCamelCase):
break
with open(lowerCamelCase, encoding='''utf-8''') as fin:
__lowerCAmelCase = fin.read()
__lowerCAmelCase = re.sub(r''' \d+$''', '''''', lowerCamelCase, 0, re.M) # remove frequency number
print(F"""Generating {merges_file}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as fout:
fout.write(lowerCamelCase)
# model config
__lowerCAmelCase = os.path.join(lowerCamelCase, '''config.json''')
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
__lowerCAmelCase = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
__lowerCAmelCase = 5
__lowerCAmelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__lowerCAmelCase = best_score_hparams[model_dir]['''length_penalty''']
else:
__lowerCAmelCase = 1.0
print(F"""Generating {fsmt_model_config_file}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(json.dumps(lowerCamelCase, ensure_ascii=lowerCamelCase, indent=lowerCamelCase))
# tokenizer config
__lowerCAmelCase = os.path.join(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1_0_2_4,
'''do_lower_case''': do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""")
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.write(json.dumps(lowerCamelCase, ensure_ascii=lowerCamelCase, indent=lowerCamelCase))
# model
__lowerCAmelCase = chkpt['''models'''][0]
__lowerCAmelCase = model.state_dict()
# rename keys to start with 'model.'
__lowerCAmelCase = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items())
# remove unneeded keys
__lowerCAmelCase = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = FSMTConfig.from_pretrained(lowerCamelCase)
__lowerCAmelCase = FSMTForConditionalGeneration(lowerCamelCase)
# check that it loads ok
model_new.load_state_dict(lowerCamelCase, strict=lowerCamelCase)
# save
__lowerCAmelCase = os.path.join(lowerCamelCase, lowerCamelCase)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(lowerCamelCase, lowerCamelCase)
print('''Conversion is done!''')
print('''\nLast step is to upload the files to s3''')
print(F"""cd {data_root}""")
print(F"""transformers-cli upload {model_dir}""")
if __name__ == "__main__":
_UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 720 |
'''simple docstring'''
from torch import nn
def __magic_name__( lowerCamelCase):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""")
| 474 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class a__ ( __magic_name__ , __magic_name__ ):
lowercase_ = "convnextv2"
def __init__( self : Any , UpperCamelCase_ : str=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : int=1e-12 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Dict=224 , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_)
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : Dict = num_stages
__UpperCAmelCase : Any = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__UpperCAmelCase : List[Any] = [3, 3, 9, 3] if depths is None else depths
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[str] = drop_path_rate
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : Optional[Any] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths) + 1)]
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names)
| 77 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A = False
class _a ( unittest.TestCase):
def __lowercase ( self : str ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self : List[str] ) -> Any:
return 12
@property
def __lowercase ( self : List[str] ) -> Dict:
return 12
@property
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
return 32
@property
def __lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
snake_case : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __lowercase ( self : str ) -> List[Any]:
snake_case : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowercase )
@property
def __lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
snake_case : List[Any] = 12
snake_case : Dict = 12
snake_case : Tuple = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case : Dict = TransformeraDModel(**_lowercase )
return model
def __lowercase ( self : Optional[int] ) -> Tuple:
snake_case : Optional[Any] = "cpu"
snake_case : Optional[int] = self.dummy_vqvae
snake_case : Dict = self.dummy_text_encoder
snake_case : Tuple = self.dummy_tokenizer
snake_case : List[Any] = self.dummy_transformer
snake_case : List[Any] = VQDiffusionScheduler(self.num_embed )
snake_case : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case : Dict = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Optional[Any] = "teddy bear playing in the pool"
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[int] = output.images
snake_case : int = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : List[str] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
snake_case : List[str] = "cpu"
snake_case : Dict = self.dummy_vqvae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Optional[Any] = self.dummy_tokenizer
snake_case : int = self.dummy_transformer
snake_case : str = VQDiffusionScheduler(self.num_embed )
snake_case : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case : List[str] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Optional[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Dict = "teddy bear playing in the pool"
snake_case : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[Any] = output.images
snake_case : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
def __lowercase ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ) -> Tuple:
snake_case : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case : Tuple = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case : Union[str, Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Optional[int] = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_lowercase , output_type="np" , )
snake_case : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 449 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class a__ :
_a : Optional[Union[str, Path]] = None
_a : bool = False
_a : bool = False
_a : bool = False
_a : Optional[Dict] = None
_a : Optional[str] = None
_a : bool = False
_a : bool = False
_a : bool = False
_a : bool = True
_a : Optional[int] = None
_a : int = 1
_a : Optional[Union[str, bool]] = None
_a : bool = False
_a : Optional[Dict] = None
_a : Optional[str] = None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(_A ) for k, v in self.__dict__.items()} )
| 552 |
def _a ( SCREAMING_SNAKE_CASE_ : list[int] ):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
__lowerCAmelCase , __lowerCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 552 | 1 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
a__ : Any = 'src/transformers'
# Matches is_xxx_available()
a__ : Any = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a__ : List[str] = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a__ : Optional[Any] = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a__ : Dict = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a__ : Any = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a__ : List[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a__ : Optional[int] = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a__ : Tuple = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a__ : Optional[Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a__ : Dict = re.compile(R'^\s*try:')
# Catches a line with else:
a__ : Dict = re.compile(R'^\s*else:')
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCAmelCase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCAmelCase = re.findall('''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCAmelCase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCAmelCase = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCAmelCase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase = []
for key in import_dict_objects.keys():
UpperCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase = '''base imports''' if key == '''none''' else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCAmelCase = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCAmelCase = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCAmelCase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
a__ : Dict = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __snake_case ( ) -> Any:
"""simple docstring"""
UpperCAmelCase = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
UpperCAmelCase = spec.loader.load_module()
UpperCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCAmelCase = '''\n'''.join(f"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 51 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowercase__ = "sshleifer/bart-tiny-random"
lowercase__ = "patrickvonplaten/t5-tiny-random"
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return AutoConfig.from_pretrained(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Any = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Tuple = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Optional[int] = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Tuple = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaises(_lowercase ):
create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=_lowercase , d=_lowercase )
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __snake_case : Any , __snake_case : int = 13 , __snake_case : int = 64 , __snake_case : int = 2 , __snake_case : int = 3 , __snake_case : int = 3 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = 128 , __snake_case : List[Any]=[16, 32, 64, 128] , __snake_case : int = 7 , __snake_case : int = 4 , __snake_case : int = 37 , __snake_case : str = "gelu" , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 10 , __snake_case : float = 0.02 , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 128 , __snake_case : List[int] = [2, 2, 2, 2] , __snake_case : int = 2 , __snake_case : int = 2 , ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = encoder_stride
lowerCamelCase = num_attention_outputs
lowerCamelCase = embed_dim
lowerCamelCase = embed_dim + 1
lowerCamelCase = resolution
lowerCamelCase = depths
lowerCamelCase = hidden_sizes
lowerCamelCase = dim
lowerCamelCase = mlp_expansion_ratio
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCamelCase__ ( self : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase = TFEfficientFormerModel(config=__snake_case )
lowerCamelCase = model(__snake_case , training=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Tuple ) -> Any:
'''simple docstring'''
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = TFEfficientFormerForImageClassification(__snake_case )
lowerCamelCase = model(__snake_case , labels=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = TFEfficientFormerForImageClassification(__snake_case )
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
snake_case = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
snake_case = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = TFEfficientFormerModelTester(self )
lowerCamelCase = ConfigTester(
self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(__snake_case )
lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
def check_hidden_states_output(__snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] ):
lowerCamelCase = model_class(__snake_case )
lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) , training=__snake_case )
lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
if hasattr(self.model_tester , 'encoder_seq_length' ):
lowerCamelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
lowerCamelCase = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase = outputs.decoder_hidden_states
self.asseretIsInstance(__snake_case , (list, tuple) )
self.assertEqual(len(__snake_case ) , __snake_case )
lowerCamelCase = getattr(self.model_tester , 'seq_length' , __snake_case )
lowerCamelCase = getattr(self.model_tester , 'decoder_seq_length' , __snake_case )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowerCamelCase__ ( self : Any , __snake_case : Tuple , __snake_case : int , __snake_case : Dict=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFEfficientFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = True
lowerCamelCase = getattr(self.model_tester , 'seq_length' , __snake_case )
lowerCamelCase = getattr(self.model_tester , 'encoder_seq_length' , __snake_case )
lowerCamelCase = getattr(self.model_tester , 'key_length' , __snake_case )
lowerCamelCase = getattr(self.model_tester , 'chunk_length' , __snake_case )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
lowerCamelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = model_class(__snake_case )
lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) , training=__snake_case )
lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase = True
lowerCamelCase = model_class(__snake_case )
lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) , training=__snake_case )
lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase = model_class(__snake_case )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__snake_case )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase = model(__snake_case )
self.assertTrue(outputs_dict is not None )
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : int ) -> Dict:
'''simple docstring'''
lowerCamelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=__snake_case , return_tensors='tf' )
# forward pass
lowerCamelCase = model(**__snake_case , training=__snake_case )
# verify the logits
lowerCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=__snake_case , return_tensors='tf' )
# forward pass
lowerCamelCase = model(**__snake_case , training=__snake_case )
# verify the logits
lowerCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 246 |
from __future__ import annotations
_lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : dict[str, list[str]] , __snake_case : str ) -> None:
'''simple docstring'''
lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase = {}
lowerCamelCase = source_vertex
def lowerCamelCase__ ( self : List[Any] ) -> None:
'''simple docstring'''
lowerCamelCase = {self.source_vertex}
lowerCamelCase = None
lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
lowerCamelCase = vertex
queue.append(__snake_case )
def lowerCamelCase__ ( self : List[Any] , __snake_case : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase = self.parent.get(__snake_case )
if target_vertex_parent is None:
lowerCamelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 246 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase__ ( lowercase=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE : Dict = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : List[Any] = script_name
else:
SCREAMING_SNAKE_CASE : List[Any] = F'''--config_file={args.config_file} {script_name}'''
SCREAMING_SNAKE_CASE : str = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE : List[Any] = execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = test_command_parser()
SCREAMING_SNAKE_CASE : str = parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 708 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = '''markuplm'''
def __init__( self : int , UpperCAmelCase_ : List[str]=3_0522 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : str=1024 , UpperCAmelCase_ : List[str]=216 , UpperCAmelCase_ : List[Any]=1001 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Dict=50 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : Any , ):
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Any = max_depth
SCREAMING_SNAKE_CASE : int = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : List[str] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = tag_pad_id
SCREAMING_SNAKE_CASE : Optional[Any] = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 488 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = original_name.split("." )[0]
UpperCAmelCase_ : List[str] = key.split("." )
UpperCAmelCase_ : Any = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] )
UpperCAmelCase_ : Dict = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] )
UpperCAmelCase_ : Optional[int] = orig_block_num - offset
UpperCAmelCase_ : Tuple = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = OrderedDict()
UpperCAmelCase_ , UpperCAmelCase_ : Any = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
UpperCAmelCase_ : Dict = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCAmelCase_ : List[str] = key[: key.find("proj" )]
UpperCAmelCase_ : List[Any] = key.replace(_SCREAMING_SNAKE_CASE , F'''patch_embeddings.{total_embed_found}.''' )
UpperCAmelCase_ : List[str] = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCAmelCase_ : str = "poolformer.encoder." + key
if "mlp.fc1" in key:
UpperCAmelCase_ : Dict = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
UpperCAmelCase_ : int = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
UpperCAmelCase_ : Optional[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
UpperCAmelCase_ : Optional[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
UpperCAmelCase_ : List[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
UpperCAmelCase_ : List[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
UpperCAmelCase_ : List[Any] = key.replace("head" , "classifier" )
UpperCAmelCase_ : Any = value
return new_state_dict
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = PoolFormerConfig()
# set attributes based on model_name
UpperCAmelCase_ : Any = "huggingface/label-files"
UpperCAmelCase_ : str = model_name[-3:]
UpperCAmelCase_ : Union[str, Any] = 10_00
UpperCAmelCase_ : Union[str, Any] = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Tuple = (1, 10_00)
# set config attributes
UpperCAmelCase_ : int = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : str = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCAmelCase_ : Tuple = [2, 2, 6, 2]
UpperCAmelCase_ : int = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : int = 4.0
UpperCAmelCase_ : Optional[int] = 0.9
elif size == "s24":
UpperCAmelCase_ : str = [4, 4, 12, 4]
UpperCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : List[Any] = 4.0
UpperCAmelCase_ : Optional[Any] = 0.9
elif size == "s36":
UpperCAmelCase_ : str = [6, 6, 18, 6]
UpperCAmelCase_ : str = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Union[str, Any] = 4.0
UpperCAmelCase_ : Any = 1E-6
UpperCAmelCase_ : Any = 0.9
elif size == "m36":
UpperCAmelCase_ : int = [6, 6, 18, 6]
UpperCAmelCase_ : Any = [96, 1_92, 3_84, 7_68]
UpperCAmelCase_ : Union[str, Any] = 4.0
UpperCAmelCase_ : int = 1E-6
UpperCAmelCase_ : Optional[Any] = 0.95
elif size == "m48":
UpperCAmelCase_ : str = [8, 8, 24, 8]
UpperCAmelCase_ : Optional[int] = [96, 1_92, 3_84, 7_68]
UpperCAmelCase_ : str = 4.0
UpperCAmelCase_ : List[str] = 1E-6
UpperCAmelCase_ : Optional[int] = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
UpperCAmelCase_ : Dict = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
# Prepare image
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
UpperCAmelCase_ : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
UpperCAmelCase_ : Tuple = rename_keys(_SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
UpperCAmelCase_ : Optional[int] = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
UpperCAmelCase_ : List[str] = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCAmelCase_ : Any = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
UpperCAmelCase_ : str = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
UpperCAmelCase_ : Dict = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
UpperCAmelCase_ : Optional[Any] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
UpperCAmelCase_ : Any = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 |
import qiskit
def lowercase ( SCREAMING_SNAKE_CASE = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(SCREAMING_SNAKE_CASE ) ) , list(range(SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 205 | 0 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
import math
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [True] * n
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__SCREAMING_SNAKE_CASE : List[Any] = i * 2
while index < n:
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = index + i
__SCREAMING_SNAKE_CASE : Optional[Any] = [2]
for i in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def __A ( _SCREAMING_SNAKE_CASE : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) + 1_0_0
__SCREAMING_SNAKE_CASE : Dict = prime_sieve(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = primes[prime_index]
while (last_prime**2) <= limit:
__SCREAMING_SNAKE_CASE : Dict = primes[prime_index + 1]
__SCREAMING_SNAKE_CASE : Tuple = last_prime**2
__SCREAMING_SNAKE_CASE : Optional[Any] = next_prime**2
# Get numbers divisible by lps(current)
__SCREAMING_SNAKE_CASE : Optional[int] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__SCREAMING_SNAKE_CASE : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__SCREAMING_SNAKE_CASE : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__SCREAMING_SNAKE_CASE : List[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 564 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a : str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = """summarization"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ROUGE_KEYS
__SCREAMING_SNAKE_CASE = """rouge2"""
def __init__( self : Any , a_ : List[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__snake_case = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(_UpperCAmelCase , num_labels=_UpperCAmelCase , mode=self.mode , **_UpperCAmelCase )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
__snake_case = Path(self.output_dir ) / "metrics.json"
__snake_case = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
__snake_case = 0
__snake_case = defaultdict(_UpperCAmelCase )
__snake_case = self.config.model_type
__snake_case = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
__snake_case = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__snake_case = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
__snake_case = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__snake_case = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__snake_case = get_git_info()["repo_sha"]
__snake_case = hparams.num_workers
__snake_case = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCAmelCase ):
__snake_case = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__snake_case = self.decoder_start_token_id
__snake_case = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
__snake_case = False
__snake_case = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__snake_case = self.hparams.eval_max_gen_length
else:
__snake_case = self.model.config.max_length
__snake_case = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A ( self : Dict , a_ : Tuple ):
"""simple docstring"""
__snake_case = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCAmelCase , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
__snake_case = True
return readable_batch
def A ( self : Optional[Any] , a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
return self.model(_UpperCAmelCase , **_UpperCAmelCase )
def A ( self : Union[str, Any] , a_ : Any ):
"""simple docstring"""
__snake_case = self.tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return lmap(str.strip , _UpperCAmelCase )
def A ( self : Tuple , a_ : str ):
"""simple docstring"""
__snake_case = self.tokenizer.pad_token_id
__snake_case , __snake_case = batch["input_ids"], batch["attention_mask"]
__snake_case = batch["labels"]
if isinstance(self.model , _UpperCAmelCase ):
__snake_case = self.model._shift_right(_UpperCAmelCase )
else:
__snake_case = shift_tokens_right(_UpperCAmelCase , _UpperCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__snake_case = decoder_input_ids
self.save_readable_batch(_UpperCAmelCase )
__snake_case = self(_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__snake_case = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__snake_case = nn.CrossEntropyLoss(ignore_index=_UpperCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
__snake_case = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__snake_case = nn.functional.log_softmax(_UpperCAmelCase , dim=-1 )
__snake_case , __snake_case = label_smoothed_nll_loss(
_UpperCAmelCase , _UpperCAmelCase , self.hparams.label_smoothing , ignore_index=_UpperCAmelCase )
return (loss,)
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def A ( self : Optional[int] , a_ : str , a_ : Any ):
"""simple docstring"""
__snake_case = self._step(_UpperCAmelCase )
__snake_case = dict(zip(self.loss_names , _UpperCAmelCase ) )
# tokens per batch
__snake_case = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
__snake_case = batch["input_ids"].shape[0]
__snake_case = batch["input_ids"].eq(self.pad ).sum()
__snake_case = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A ( self : List[str] , a_ : Optional[Any] , a_ : str ):
"""simple docstring"""
return self._generative_step(_UpperCAmelCase )
def A ( self : int , a_ : Tuple , a_ : List[Any]="val" ):
"""simple docstring"""
self.step_count += 1
__snake_case = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__snake_case = losses["loss"]
__snake_case = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
__snake_case = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__snake_case = torch.tensor(_UpperCAmelCase ).type_as(_UpperCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCAmelCase )
__snake_case = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
__snake_case = self.step_count
self.metrics[prefix].append(_UpperCAmelCase ) # callback writes this to self.metrics_save_path
__snake_case = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def A ( self : Union[str, Any] , a_ : int , a_ : int ):
"""simple docstring"""
return calculate_rouge(_UpperCAmelCase , _UpperCAmelCase )
def A ( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__snake_case = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=_UpperCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__snake_case = (time.time() - ta) / batch["input_ids"].shape[0]
__snake_case = self.ids_to_clean_text(_UpperCAmelCase )
__snake_case = self.ids_to_clean_text(batch["labels"] )
__snake_case = self._step(_UpperCAmelCase )
__snake_case = dict(zip(self.loss_names , _UpperCAmelCase ) )
__snake_case = self.calc_generative_metrics(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.mean(lmap(_UpperCAmelCase , _UpperCAmelCase ) )
base_metrics.update(gen_time=_UpperCAmelCase , gen_len=_UpperCAmelCase , preds=_UpperCAmelCase , target=_UpperCAmelCase , **_UpperCAmelCase )
return base_metrics
def A ( self : List[str] , a_ : Optional[Any] , a_ : Any ):
"""simple docstring"""
return self._generative_step(_UpperCAmelCase )
def A ( self : Dict , a_ : str ):
"""simple docstring"""
return self.validation_epoch_end(_UpperCAmelCase , prefix="test" )
def A ( self : int , a_ : str ):
"""simple docstring"""
__snake_case = self.n_obs[type_path]
__snake_case = self.target_lens[type_path]
__snake_case = self.dataset_class(
self.tokenizer , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , max_target_length=_UpperCAmelCase , **self.dataset_kwargs , )
return dataset
def A ( self : List[str] , a_ : Tuple , a_ : List[Any] , a_ : Tuple = False ):
"""simple docstring"""
__snake_case = self.get_dataset(_UpperCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__snake_case = dataset.make_sortish_sampler(_UpperCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__snake_case = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCAmelCase , num_workers=self.num_workers , sampler=_UpperCAmelCase , )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=_UpperCAmelCase )
return dataloader
def A ( self : Optional[int] ):
"""simple docstring"""
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def A ( self : int ):
"""simple docstring"""
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A ( a_ : int , a_ : Optional[int] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_UpperCAmelCase , _UpperCAmelCase )
add_generic_args(_UpperCAmelCase , _UpperCAmelCase )
parser.add_argument(
"--max_source_length" , default=1_024 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=_UpperCAmelCase )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=_UpperCAmelCase )
parser.add_argument("--max_tokens_per_batch" , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument("--logger_name" , type=_UpperCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=_UpperCAmelCase , default=500 , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=_UpperCAmelCase , default="summarization" , required=_UpperCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=_UpperCAmelCase , default=0.0 , required=_UpperCAmelCase )
parser.add_argument("--src_lang" , type=_UpperCAmelCase , default="" , required=_UpperCAmelCase )
parser.add_argument("--tgt_lang" , type=_UpperCAmelCase , default="" , required=_UpperCAmelCase )
parser.add_argument("--eval_beams" , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
"--val_metric" , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = """translation"""
__SCREAMING_SNAKE_CASE = ["""loss"""]
__SCREAMING_SNAKE_CASE = ["""bleu"""]
__SCREAMING_SNAKE_CASE = """bleu"""
def __init__( self : str , a_ : Tuple , **a_ : Union[str, Any] ):
"""simple docstring"""
super().__init__(_UpperCAmelCase , **_UpperCAmelCase )
__snake_case = hparams.src_lang
__snake_case = hparams.tgt_lang
def A ( self : Tuple , a_ : Union[str, Any] , a_ : int ):
"""simple docstring"""
return calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ) -> Dict:
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__snake_case = SummarizationModule(__lowercase )
else:
__snake_case = TranslationModule(__lowercase )
__snake_case = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
__snake_case = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__snake_case = os.environ.get("WANDB_PROJECT" , __lowercase )
__snake_case = WandbLogger(name=model.output_dir.name , project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__snake_case = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
__snake_case = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__snake_case = False
__snake_case = args.val_metric == "loss"
__snake_case = generic_train(
__lowercase , __lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowercase ) , early_stopping_callback=__lowercase , logger=__lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
__snake_case = ""
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=__lowercase ) )
if checkpoints:
__snake_case = checkpoints[-1]
__snake_case = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
a : Dict = pl.Trainer.add_argparse_args(parser)
a : List[str] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a : Optional[Any] = parser.parse_args()
main(args)
| 69 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _a ( unittest.TestCase ):
"""simple docstring"""
A_ = MODEL_FOR_MASKED_LM_MAPPING
A_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25506, 'token_str': ' accuser'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS'},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13606, 'token_str': ' Clara'},
] , )
UpperCamelCase_ = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
UpperCamelCase_ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
UpperCamelCase_ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
UpperCamelCase_ = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
UpperCamelCase_ = None
UpperCamelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = fill_masker.tokenizer
UpperCamelCase_ = fill_masker.model
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _UpperCAmelCase )
UpperCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['token_str'] for top_mask in outputs]
UpperCamelCase_ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCamelCase_ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = tokenizer.get_vocab()
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_ = [el['token_str'] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCamelCase_ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_ = sorted(vocab.keys() )[:3]
UpperCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCamelCase_ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
[
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
{'sequence': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase ), 'token': ANY(_UpperCAmelCase ), 'token_str': ANY(_UpperCAmelCase )},
],
] , )
| 23 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Tuple = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
snake_case_ : Union[str, Any] = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
snake_case_ : Union[str, Any] = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase: Optional[Any] = json.loads(f.read() )
UpperCAmelCase: List[Any] = collections.OrderedDict()
UpperCAmelCase: List[str] = collections.OrderedDict()
UpperCAmelCase: Any = collections.OrderedDict()
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase: Optional[Any] = f.readlines()
UpperCAmelCase: int = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case_ ):
UpperCAmelCase: Optional[int] = b
UpperCAmelCase: int = idx
for wd in b:
UpperCAmelCase: str = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Dict = VOCAB_FILES_NAMES
lowerCamelCase__: List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , __snake_case , __snake_case , __snake_case="<|endoftext|>" , __snake_case="<|endoftext|>" , __snake_case="<|startoftext|>" , __snake_case="<|endoftext|>" , __snake_case=False , **__snake_case , ) -> int:
"""simple docstring"""
super().__init__(
unk_token=__snake_case , pad_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , do_clean_text=__snake_case , **__snake_case , )
if not os.path.isfile(__snake_case ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__snake_case ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase: Optional[int] = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: str = load_vocab_and_emoji(__snake_case , __snake_case )
UpperCAmelCase: Optional[int] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
return len(self.raw_vocab )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A__ ( self , __snake_case ) -> int:
"""simple docstring"""
return self.subword_tokenizer.tokenize(__snake_case , clean=self.do_clean_text )
def A__ ( self , __snake_case ) -> Any:
"""simple docstring"""
return self.vocab.get(__snake_case , self.vocab.get(self.unk_token ) )
def A__ ( self , __snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__snake_case )
def A__ ( self , __snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: Any = "".join(__snake_case ).strip()
return out_string
def A__ ( self , __snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] )
if len(__snake_case ) > self.model_max_length:
UpperCAmelCase: List[Any] = input_ids[-self.model_max_length :]
return input_ids
def A__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase: Tuple = 0
if os.path.isdir(__snake_case ):
UpperCAmelCase: Optional[Any] = os.path.join(
__snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase: Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase: Optional[int] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase: Any = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__snake_case , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase: Any = token_index
writer.write(",".join(__snake_case ) + "\n" )
index += 1
with open(__snake_case , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __snake_case )
return vocab_file, emoji_file
class __lowerCamelCase ( lowercase ):
def __init__( self , __snake_case , __snake_case , __snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase: List[Any] = vocab # same as swe
UpperCAmelCase: Optional[Any] = ids_to_tokens # same as bpe
UpperCAmelCase: Union[str, Any] = emoji
UpperCAmelCase: Dict = np.max([len(__snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase: Any = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase: Tuple = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase: List[Any] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase: List[Any] = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase: Any = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase: List[str] = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase: str = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase: Any = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase: str = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> List[Any]:
"""simple docstring"""
return len(self.ids_to_tokens )
def A__ ( self , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = self.content_repattera.sub("<URL>" , __snake_case )
UpperCAmelCase: str = self.content_repattera.sub("<EMAIL>" , __snake_case )
UpperCAmelCase: List[Any] = self.content_repattera.sub("<TEL>" , __snake_case )
UpperCAmelCase: Optional[int] = self.content_repattera.sub("<DATE>" , __snake_case )
UpperCAmelCase: str = self.content_repattera.sub("<DATE>" , __snake_case )
UpperCAmelCase: List[str] = self.content_repattera.sub("<PRICE>" , __snake_case )
UpperCAmelCase: Tuple = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase: Union[str, Any] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def A__ ( self , __snake_case , __snake_case=False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: List[Any] = text.replace(" " , "<SP>" )
UpperCAmelCase: Tuple = text.replace(" " , "<SP>" )
UpperCAmelCase: Any = text.replace("\r\n" , "<BR>" )
UpperCAmelCase: Union[str, Any] = text.replace("\n" , "<BR>" )
UpperCAmelCase: Optional[int] = text.replace("\r" , "<BR>" )
UpperCAmelCase: int = text.replace("\t" , "<TAB>" )
UpperCAmelCase: Optional[Any] = text.replace("—" , "ー" )
UpperCAmelCase: Any = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase: Dict = text.replace(__snake_case , __snake_case )
if clean:
UpperCAmelCase: Optional[Any] = self.clean_text(__snake_case )
def check_simbol(__snake_case ):
UpperCAmelCase: str = x.encode()
if len(__snake_case ) == 1 and len(__snake_case ) == 2:
UpperCAmelCase: str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(__snake_case ):
UpperCAmelCase: int = x.encode()
if len(__snake_case ) == 1 and len(__snake_case ) == 3:
UpperCAmelCase: Dict = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_8080 and c <= 0xE2_B07F:
return True
return False
UpperCAmelCase: int = 0
UpperCAmelCase: List[Any] = []
while pos < len(__snake_case ):
UpperCAmelCase: List[str] = min(len(__snake_case ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase: Tuple = [] # (token_id, token, pos)
for e in range(__snake_case , __snake_case , -1 ):
UpperCAmelCase: Any = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__snake_case ) > 2:
UpperCAmelCase: Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Tuple = sorted(__snake_case , key=lambda __snake_case : x[0] )[0]
result.append(__snake_case )
UpperCAmelCase: Dict = e
else:
UpperCAmelCase: Union[str, Any] = pos + 1
UpperCAmelCase: int = text[pos:end]
if check_simbol(__snake_case ):
result.append("<KIGOU>" )
elif checkuae(__snake_case ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase: List[Any] = end
return result
def A__ ( self , __snake_case , __snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase: int = []
UpperCAmelCase: int = []
UpperCAmelCase: str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__snake_case ) > 0:
words.append(bytearray(__snake_case ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase: Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__snake_case )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__snake_case )
if len(__snake_case ) > 0:
words.append(bytearray(__snake_case ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase: Tuple = "".join(__snake_case )
return text
| 166 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Optional[int] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : Optional[int] = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Union[str, Any] = '''maskformer'''
lowerCamelCase__: Optional[int] = {'''hidden_size''': '''mask_feature_size'''}
lowerCamelCase__: Optional[int] = ['''resnet''', '''swin''']
lowerCamelCase__: Optional[int] = ['''detr''']
def __init__( self , __snake_case = 2_5_6 , __snake_case = 2_5_6 , __snake_case = 0.1 , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0.02 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 20.0 , __snake_case = None , **__snake_case , ) -> List[Any]:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase: str = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = backbone_config.pop("model_type" )
UpperCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase: Any = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase: Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase: Dict = (
decoder_config.pop("model_type" ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase: Optional[Any] = config_class.from_dict(__snake_case )
UpperCAmelCase: Any = backbone_config
UpperCAmelCase: Dict = decoder_config
# main feature dimension for the model
UpperCAmelCase: Optional[int] = fpn_feature_size
UpperCAmelCase: Union[str, Any] = mask_feature_size
# initializer
UpperCAmelCase: Tuple = init_std
UpperCAmelCase: Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase: Optional[int] = cross_entropy_weight
UpperCAmelCase: List[str] = dice_weight
UpperCAmelCase: List[Any] = mask_weight
UpperCAmelCase: List[str] = use_auxiliary_loss
UpperCAmelCase: List[str] = no_object_weight
UpperCAmelCase: int = output_auxiliary_logits
UpperCAmelCase: int = self.decoder_config.encoder_attention_heads
UpperCAmelCase: Dict = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def A__ ( cls , __snake_case , __snake_case , **__snake_case ) -> Tuple:
"""simple docstring"""
return cls(
backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , )
def A__ ( self ) -> Dict[str, any]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase: Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase: List[Any] = self.decoder_config.to_dict()
UpperCAmelCase: Dict = self.__class__.model_type
return output
| 166 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase_ : Union[str, Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
lowerCAmelCase_ : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ) -> Any:
_a = cn.convert_to_negative(lowercase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ) -> Optional[int]:
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ) -> str:
_a = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ) -> Optional[Any]:
_a = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_a = canny.canny(lowercase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ) -> Union[str, Any]:
assert gg.gaussian_filter(lowercase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ) -> int:
# laplace diagonals
_a = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_a = conv.img_convolve(lowercase , lowercase ).astype(lowercase )
assert res.any()
def _lowerCamelCase ( ) -> Union[str, Any]:
assert med.median_filter(lowercase , 3 ).any()
def _lowerCamelCase ( ) -> Union[str, Any]:
_a , _a = sob.sobel_filter(lowercase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = sp.make_sepia(lowercase , 20 )
assert sepia.all()
def _lowerCamelCase ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
_a = bs.Burkes(imread(lowercase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( lowercase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
_a = rs.NearestNeighbour(imread(lowercase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ) -> Optional[int]:
_a = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_a = imread(lowercase , 0 )
# Test for get_neighbors_pixel function() return not None
_a = 0
_a = 0
_a = image[x_coordinate][y_coordinate]
_a = lbp.get_neighbors_pixel(
lowercase , lowercase , lowercase , lowercase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_a = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_a = lbp.local_binary_value(lowercase , lowercase , lowercase )
assert lbp_image.any()
| 692 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__a : int , **__a : Optional[Any] ):
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {"vocab_file": "spiece.model"}
snake_case_ : List[str] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
snake_case_ : Dict = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
snake_case_ : Any = "▁"
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _snake_case : Tuple , _snake_case : Any="</s>" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="<pad>" , _snake_case : Optional[Any]=100 , _snake_case : str=None , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : List[str]=True , **_snake_case : Tuple , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(_snake_case)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ = len(set(filter(lambda _snake_case: bool('''extra_id''' in str(_snake_case)) , _snake_case)))
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''')
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''')
UpperCAmelCase_ = legacy
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , extra_ids=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=_snake_case , **_snake_case , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = extra_ids
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_snake_case)
@staticmethod
def lowerCamelCase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[int]):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCAmelCase_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _snake_case , )
return max_model_length
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCamelCase ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_snake_case)) + [1]
return ([0] * len(_snake_case)) + [1] + ([0] * len(_snake_case)) + [1]
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return list(
set(filter(lambda _snake_case: bool(re.search(r'''<extra_id_\d+>''' , _snake_case)) is not None , self.additional_special_tokens)))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return [self._convert_token_to_id(_snake_case) for token in self.get_sentinel_tokens()]
def lowerCamelCase ( self : str , _snake_case : List[int]):
"""simple docstring"""
if len(_snake_case) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''')
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def lowerCamelCase ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = self._add_eos_if_not_present(_snake_case)
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ = self._add_eos_if_not_present(_snake_case)
return token_ids_a + token_ids_a
def __getstate__( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : Optional[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCamelCase ( self : List[Any] , _snake_case : "TextInput" , **_snake_case : Any):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase_ = SPIECE_UNDERLINE + text.replace(_snake_case , ''' ''')
return super().tokenize(_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : int , **_snake_case : Dict):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase_ = text.startswith(_snake_case)
if is_first:
UpperCAmelCase_ = text[1:]
UpperCAmelCase_ = self.sp_model.encode(_snake_case , out_type=_snake_case)
if not self.legacy and not is_first and not text.startswith(''' ''') and tokens[0].startswith(_snake_case):
UpperCAmelCase_ = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
if token.startswith('''<extra_id_'''):
UpperCAmelCase_ = re.match(r'''<extra_id_(\d+)>''' , _snake_case)
UpperCAmelCase_ = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_snake_case)
def lowerCamelCase ( self : str , _snake_case : Tuple):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase_ = self.sp_model.IdToPiece(_snake_case)
else:
UpperCAmelCase_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCamelCase ( self : str , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_snake_case)
UpperCAmelCase_ = False
out_string += self.sp_model.decode(_snake_case)
return out_string.strip()
def lowerCamelCase ( self : int , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _snake_case)
elif not os.path.isfile(self.vocab_file):
with open(_snake_case , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_snake_case)
return (out_vocab_file,)
| 169 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A (__A : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def A (__A : int , __A : int , __A : int , __A : int , __A : int , __A : int ) -> tuple[int, int]:
"""simple docstring"""
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(__A , __A )
top //= hcf
bottom //= hcf
return top, bottom
def A (__A : int = 35 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__A ) and is_sq(__A ):
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = int(sqrt(__A ) )
UpperCAmelCase_ = gcd(__A , __A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
__A , __A , __A , __A , __A , __A )
unique_s.add(__A )
for num, den in unique_s:
total += Fraction(__A , __A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 169 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = BartphoTokenizer
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE__ = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''This is a là test'''
SCREAMING_SNAKE_CASE__ = '''This is a<unk><unk> test'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = '''This is a là test'''
SCREAMING_SNAKE_CASE__ = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 100 | '''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__snake_case : Any = _symbol_database.Default()
__snake_case : Dict = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__snake_case : Union[str, Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__snake_case : Any = None
__snake_case : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__snake_case : Union[str, Any] = 45
__snake_case : str = 15_81
__snake_case : Optional[int] = 15_17
__snake_case : Optional[Any] = 15_70
__snake_case : Union[str, Any] = 15_84
__snake_case : Any = 17_93
__snake_case : Optional[int] = 17_95
__snake_case : Tuple = 19_16
__snake_case : int = 18_64
__snake_case : Any = 19_05
__snake_case : Optional[int] = 19_19
__snake_case : str = 24_29
__snake_case : Tuple = 22_08
__snake_case : str = 24_18
__snake_case : Tuple = 23_23
__snake_case : Optional[int] = 24_07
# @@protoc_insertion_point(module_scope)
| 660 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Union[str, Any], snake_case :List[Any], snake_case :int=2, snake_case :str=True, snake_case :Any=False, snake_case :Optional[int]=10, snake_case :List[str]=3, snake_case :Optional[Any]=32 * 8, snake_case :int=32 * 8, snake_case :str=4, snake_case :Union[str, Any]=64, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =is_training
_lowercase =use_auxiliary_loss
_lowercase =num_queries
_lowercase =num_channels
_lowercase =min_size
_lowercase =max_size
_lowercase =num_labels
_lowercase =hidden_dim
_lowercase =hidden_dim
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
snake_case)
_lowercase =torch.ones([self.batch_size, self.min_size, self.max_size], device=snake_case)
_lowercase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=snake_case) > 0.5
).float()
_lowercase =(torch.rand((self.batch_size, self.num_labels), device=snake_case) > 0.5).long()
_lowercase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =MaskaFormerConfig(
hidden_size=self.hidden_dim, )
_lowercase =self.num_queries
_lowercase =self.num_labels
_lowercase =[1, 1, 1, 1]
_lowercase =self.num_channels
_lowercase =64
_lowercase =128
_lowercase =self.hidden_dim
_lowercase =self.hidden_dim
_lowercase =self.hidden_dim
return config
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.prepare_config_and_inputs()
_lowercase ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCamelCase__ ( self :Tuple, snake_case :Optional[int], snake_case :Any):
"""simple docstring"""
_lowercase =output.encoder_hidden_states
_lowercase =output.pixel_decoder_hidden_states
_lowercase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case), len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case), len(config.backbone_config.depths))
self.parent.assertTrue(len(snake_case), config.decoder_layers)
def UpperCamelCase__ ( self :Tuple, snake_case :Union[str, Any], snake_case :List[Any], snake_case :Optional[int], snake_case :Tuple=False):
"""simple docstring"""
with torch.no_grad():
_lowercase =MaskaFormerModel(config=snake_case)
model.to(snake_case)
model.eval()
_lowercase =model(pixel_values=snake_case, pixel_mask=snake_case)
_lowercase =model(snake_case, output_hidden_states=snake_case)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(snake_case, snake_case)
def UpperCamelCase__ ( self :List[str], snake_case :Tuple, snake_case :Union[str, Any], snake_case :str, snake_case :str, snake_case :Optional[Any]):
"""simple docstring"""
_lowercase =MaskaFormerForUniversalSegmentation(config=snake_case)
model.to(snake_case)
model.eval()
def comm_check_on_output(snake_case :str):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_lowercase =model(pixel_values=snake_case, pixel_mask=snake_case)
_lowercase =model(snake_case)
comm_check_on_output(snake_case)
_lowercase =model(
pixel_values=snake_case, pixel_mask=snake_case, mask_labels=snake_case, class_labels=snake_case)
comm_check_on_output(snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape, torch.Size([1]))
@require_torch
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__lowerCAmelCase : str ={'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__lowerCAmelCase : Optional[int] =False
__lowerCAmelCase : Tuple =False
__lowerCAmelCase : Optional[Any] =False
__lowerCAmelCase : Optional[Any] =False
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =MaskaFormerModelTester(self)
_lowercase =ConfigTester(self, config_class=snake_case, has_text_modality=snake_case)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case, **snake_case, output_hidden_states=snake_case)
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case)
@unittest.skip(reason='Mask2Former does not use inputs_embeds')
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method')
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former is not a generative model')
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not use token embeddings')
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case)
_lowercase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1], snake_case)
@slow
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowercase =MaskaFormerModel.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =(self.model_tester.min_size,) * 2
_lowercase ={
'pixel_values': torch.randn((2, 3, *size), device=snake_case),
'mask_labels': torch.randn((2, 10, *size), device=snake_case),
'class_labels': torch.zeros(2, 10, device=snake_case).long(),
}
_lowercase =self.model_tester.get_config()
_lowercase =MaskaFormerForUniversalSegmentation(snake_case).to(snake_case)
_lowercase =model(**snake_case)
self.assertTrue(outputs.loss is not None)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case, **snake_case, output_hidden_states=snake_case)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case).to(snake_case)
_lowercase =model(**snake_case, output_attentions=snake_case)
self.assertTrue(outputs.attentions is not None)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowercase =self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase =model_class(snake_case)
model.to(snake_case)
model.train()
_lowercase =model(snake_case, mask_labels=snake_case, class_labels=snake_case).loss
loss.backward()
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase =True
_lowercase =True
_lowercase =model_class(snake_case).to(snake_case)
model.train()
_lowercase =model(snake_case, mask_labels=snake_case, class_labels=snake_case)
_lowercase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowercase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowercase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowercase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_SCREAMING_SNAKE_CASE = 1e-4
def _snake_case () -> List[str]:
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =MaskaFormerModel.from_pretrained(self.model_checkpoints).to(snake_case)
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(snake_case, return_tensors='pt').to(snake_case)
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(snake_case, (1, 3, 384, 384))
with torch.no_grad():
_lowercase =model(**snake_case)
_lowercase =torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], snake_case, atol=snake_case))
_lowercase =torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], snake_case, atol=snake_case))
_lowercase =torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]]).to(snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], snake_case, atol=snake_case))
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(snake_case, return_tensors='pt').to(snake_case)
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(snake_case, (1, 3, 384, 384))
with torch.no_grad():
_lowercase =model(**snake_case)
# masks_queries_logits
_lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_lowercase =[
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_lowercase =torch.tensor(snake_case).to(snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], snake_case, atol=snake_case))
# class_queries_logits
_lowercase =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1))
_lowercase =torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
]).to(snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], snake_case, atol=snake_case))
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(snake_case).eval()
_lowercase =self.default_image_processor
_lowercase =image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))], segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)], return_tensors='pt', )
_lowercase =inputs['pixel_values'].to(snake_case)
_lowercase =[el.to(snake_case) for el in inputs['mask_labels']]
_lowercase =[el.to(snake_case) for el in inputs['class_labels']]
with torch.no_grad():
_lowercase =model(**snake_case)
self.assertTrue(outputs.loss is not None)
| 557 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def _snake_case (_snake_case : str , _snake_case : str) -> tuple[str, float]:
_lowercase =len([g for position, g in enumerate(_snake_case) if g == main_target[position]])
return (item, float(_snake_case))
def _snake_case (_snake_case : str , _snake_case : str) -> tuple[str, str]:
_lowercase =random.randint(0 , len(_snake_case) - 1)
_lowercase =parent_a[:random_slice] + parent_a[random_slice:]
_lowercase =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _snake_case (_snake_case : str , _snake_case : list[str]) -> str:
_lowercase =list(_snake_case)
if random.uniform(0 , 1) < MUTATION_PROBABILITY:
_lowercase =random.choice(_snake_case)
return "".join(_snake_case)
def _snake_case (_snake_case : tuple[str, float] , _snake_case : list[tuple[str, float]] , _snake_case : list[str] , ) -> list[str]:
_lowercase =[]
# Generate more children proportionally to the fitness score.
_lowercase =int(parent_a[1] * 100) + 1
_lowercase =10 if child_n >= 10 else child_n
for _ in range(_snake_case):
_lowercase =population_score[random.randint(0 , _snake_case)][0]
_lowercase , _lowercase =crossover(parent_a[0] , _snake_case)
# Append new string to the population list.
pop.append(mutate(_snake_case , _snake_case))
pop.append(mutate(_snake_case , _snake_case))
return pop
def _snake_case (_snake_case : str , _snake_case : list[str] , _snake_case : bool = True) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_lowercase =f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_snake_case)
# Verify that the target contains no genes besides the ones inside genes variable.
_lowercase =sorted({c for c in target if c not in genes})
if not_in_genes_list:
_lowercase =f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_snake_case)
# Generate random starting population.
_lowercase =[]
for _ in range(_snake_case):
population.append(''.join([random.choice(_snake_case) for i in range(len(_snake_case))]))
# Just some logs to know what the algorithms is doing.
_lowercase , _lowercase =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_snake_case)
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowercase =[evaluate(_snake_case , _snake_case) for item in population]
# Check if there is a matching evolution.
_lowercase =sorted(_snake_case , key=lambda _snake_case: x[1] , reverse=_snake_case)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''')
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowercase =population[: int(N_POPULATION / 3)]
population.clear()
population.extend(_snake_case)
# Normalize population score to be between 0 and 1.
_lowercase =[
(item, score / len(_snake_case)) for item, score in population_score
]
# This is selection
for i in range(_snake_case):
population.extend(select(population_score[int(_snake_case)] , _snake_case , _snake_case))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_snake_case) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 557 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
a_ : Union[str, Any] = '''resnet'''
a_ : Optional[Any] = ['''basic''', '''bottleneck''']
def __init__(self , UpperCAmelCase=3 , UpperCAmelCase=6_4 , UpperCAmelCase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCAmelCase=[3, 4, 6, 3] , UpperCAmelCase="bottleneck" , UpperCAmelCase="relu" , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""")
__UpperCAmelCase =num_channels
__UpperCAmelCase =embedding_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =layer_type
__UpperCAmelCase =hidden_act
__UpperCAmelCase =downsample_in_first_stage
__UpperCAmelCase =['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase) + 1)]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : str = version.parse('''1.11''' )
@property
def A__ (self):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A__ (self):
'''simple docstring'''
return 1e-3
| 132 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Union[str, Any] = '''roc_bert'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=7_6_8 , UpperCAmelCase=9_1_0 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2_4_8_5_8 , UpperCAmelCase=True , **UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =vocab_size
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =use_cache
__UpperCAmelCase =enable_pronunciation
__UpperCAmelCase =enable_shape
__UpperCAmelCase =pronunciation_embed_dim
__UpperCAmelCase =pronunciation_vocab_size
__UpperCAmelCase =shape_embed_dim
__UpperCAmelCase =shape_vocab_size
__UpperCAmelCase =concat_input
__UpperCAmelCase =position_embedding_type
__UpperCAmelCase =classifier_dropout
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase)
| 132 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
A = [True] * (num + 1)
A = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case__ ):
A = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num)) | 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.