code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , ) -> Any:
output_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , use_external_data_format=__UpperCamelCase , enable_onnx_checker=__UpperCamelCase , opset_version=__UpperCamelCase , )
else:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , opset_version=__UpperCamelCase , )
@torch.no_grad()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[Any]:
_lowerCAmelCase =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowerCAmelCase ="""cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowerCAmelCase ="""cpu"""
_lowerCAmelCase =Path(__UpperCamelCase )
# VAE DECODER
_lowerCAmelCase =AutoencoderKL.from_pretrained(model_path + """/vae""" )
_lowerCAmelCase =vae_decoder.config.latent_channels
# forward only through the decoder part
_lowerCAmelCase =vae_decoder.decode
onnx_export(
__UpperCamelCase , model_args=(
torch.randn(1 , __UpperCamelCase , 25 , 25 ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__UpperCamelCase , )
del vae_decoder
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__A = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 367 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__A = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__A = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__A = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_lowerCAmelCase =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =AudioDiffusionPipeline(vqvae=__UpperCAmelCase , unet=self.dummy_unet , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase )
_lowerCAmelCase =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase =pipe(generator=__UpperCAmelCase , steps=4 )
_lowerCAmelCase =output.audios[0]
_lowerCAmelCase =output.images[0]
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase =pipe(generator=__UpperCAmelCase , steps=4 , return_dict=__UpperCAmelCase )
_lowerCAmelCase =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase =np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowerCAmelCase =DDIMScheduler()
_lowerCAmelCase =self.dummy_vqvae_and_unet
_lowerCAmelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase )
_lowerCAmelCase =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
np.random.seed(0 )
_lowerCAmelCase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase =pipe(raw_audio=__UpperCAmelCase , generator=__UpperCAmelCase , start_step=5 , steps=10 )
_lowerCAmelCase =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase =self.dummy_unet_condition
_lowerCAmelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__UpperCAmelCase , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase )
_lowerCAmelCase =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
np.random.seed(0 )
_lowerCAmelCase =torch.rand((1, 1, 10) )
_lowerCAmelCase =pipe(generator=__UpperCAmelCase , encoding=__UpperCAmelCase )
_lowerCAmelCase =output.images[0]
_lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =torch_device
_lowerCAmelCase =DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_lowerCAmelCase =pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase =pipe(generator=__UpperCAmelCase )
_lowerCAmelCase =output.audios[0]
_lowerCAmelCase =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 369 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 0 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list:
_lowerCAmelCase =word.split()
def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
_lowerCAmelCase =max_width - width
_lowerCAmelCase =len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowerCAmelCase =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowerCAmelCase =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowerCAmelCase =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
_lowerCAmelCase =[]
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
_lowerCAmelCase , _lowerCAmelCase =[word], len(__UpperCamelCase )
_lowerCAmelCase =max_width - width - len(__UpperCamelCase )
answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 370 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 0 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
_lowerCAmelCase =""""""
while len(__UpperCamelCase ) % 3 != 0:
_lowerCAmelCase ="""0""" + bin_string
_lowerCAmelCase =[
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_lowerCAmelCase =0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 0 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =int(__UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =divmod(__UpperCamelCase , 2 )
return binary_recursive(__UpperCamelCase ) + str(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =str(__UpperCamelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
_lowerCAmelCase ="""-""" if number.startswith("""-""" ) else """"""
_lowerCAmelCase =number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(__UpperCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
import qiskit
def _lowerCamelCase(__UpperCamelCase = 2 ) -> qiskit.result.counts.Counts:
_lowerCAmelCase =qubits
# Using Aer's simulator
_lowerCAmelCase =qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_lowerCAmelCase =qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __UpperCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __UpperCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__UpperCamelCase ) ) , list(range(__UpperCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCAmelCase =qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1024 , __UpperCamelCase=1024 , __UpperCamelCase=False , **__UpperCamelCase ) -> int:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__UpperCamelCase )
_lowerCAmelCase =SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path="""train""" , **__UpperCamelCase )
_lowerCAmelCase =tok.pad_token_id
def get_lens(__UpperCamelCase ):
_lowerCAmelCase =tqdm(
DataLoader(__UpperCamelCase , batch_size=512 , num_workers=8 , shuffle=__UpperCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowerCAmelCase =[]
for batch in dl:
_lowerCAmelCase =batch["""input_ids"""].ne(__UpperCamelCase ).sum(1 ).tolist()
_lowerCAmelCase =batch["""labels"""].ne(__UpperCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__UpperCamelCase , __UpperCamelCase ):
max_lens.append(max(__UpperCamelCase , __UpperCamelCase ) )
else:
max_lens.extend(__UpperCamelCase )
return max_lens
_lowerCAmelCase =get_lens(__UpperCamelCase )
_lowerCAmelCase =SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path="""val""" , **__UpperCamelCase )
_lowerCAmelCase =get_lens(__UpperCamelCase )
pickle_save(__UpperCamelCase , train_ds.len_file )
pickle_save(__UpperCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 355 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 356 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 0 |
"""simple docstring"""
from manim import *
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""CPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(4 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""GPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Model""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase =Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
cpu_targs.append(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase =MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
_lowerCAmelCase =fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 357 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 0 |
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str:
print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase )
def _lowerCamelCase() -> Optional[Any]:
_lowerCAmelCase =int(input("""Height of hanoi: """ ).strip() )
move_tower(__UpperCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 358 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341 | 0 |
"""simple docstring"""
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''microsoft/speecht5_tts'''
lowerCamelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
lowerCamelCase = '''text_reader'''
lowerCamelCase = SpeechTaProcessor
lowerCamelCase = SpeechTaForTextToSpeech
lowerCamelCase = SpeechTaHifiGan
lowerCamelCase = ['''text''']
lowerCamelCase = ['''audio''']
def _lowerCAmelCase ( self ) -> List[Any]:
if self.post_processor is None:
_lowerCAmelCase ="""microsoft/speecht5_hifigan"""
super().setup()
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Tuple:
_lowerCAmelCase =self.pre_processor(text=__UpperCAmelCase , return_tensors="""pt""" , truncation=__UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase =load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase =torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
with torch.no_grad():
return self.model.generate_speech(**__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
with torch.no_grad():
return self.post_processor(__UpperCAmelCase ).cpu().detach()
| 359 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 0 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase = 10**9 ) -> int:
_lowerCAmelCase =1
_lowerCAmelCase =2
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_lowerCAmelCase =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 360 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 0 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =data
_lowerCAmelCase =None
_lowerCAmelCase =None
def _lowerCamelCase() -> TreeNode:
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase =input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase =queue.Queue()
_lowerCAmelCase =TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
_lowerCAmelCase =q.get()
_lowerCAmelCase =F'''Enter the left node of {node_found.data}: '''
_lowerCAmelCase =input(__UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase =TreeNode(int(__UpperCamelCase ) )
_lowerCAmelCase =left_node
q.put(__UpperCamelCase )
_lowerCAmelCase =F'''Enter the right node of {node_found.data}: '''
_lowerCAmelCase =input(__UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase =TreeNode(int(__UpperCamelCase ) )
_lowerCAmelCase =right_node
q.put(__UpperCamelCase )
raise
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
_lowerCAmelCase =queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
_lowerCAmelCase =q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
_lowerCAmelCase =queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
_lowerCAmelCase =[]
while not q.empty():
_lowerCAmelCase =q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
_lowerCAmelCase =[]
_lowerCAmelCase =node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__UpperCamelCase )
_lowerCAmelCase =n.left
# end of while means current node doesn't have left child
_lowerCAmelCase =stack.pop()
# start to traverse its right child
_lowerCAmelCase =n.right
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
_lowerCAmelCase =[]
_lowerCAmelCase =node
while n or stack:
while n:
stack.append(__UpperCamelCase )
_lowerCAmelCase =n.left
_lowerCAmelCase =stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase =n.right
def _lowerCamelCase(__UpperCamelCase ) -> None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase =[], []
_lowerCAmelCase =node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase =stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _lowerCamelCase(__UpperCamelCase = "" , __UpperCamelCase=50 , __UpperCamelCase="*" ) -> str:
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase =divmod(width - len(__UpperCamelCase ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
__A = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 363 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = False
lowerCamelCase = 3.0
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {"""a""": 2, """c""": 2.2_5} )
@require_cuda
def _lowerCAmelCase ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_lowerCAmelCase =GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase =Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __UpperCAmelCase )
@require_multi_gpu
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__A = Accelerator(kwargs_handlers=[ddp_scaler])
__A = torch.nn.Linear(100, 200)
__A = accelerator.prepare(model)
# Check the values changed in kwargs
__A = ''
__A = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 364 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 0 |
"""simple docstring"""
import socket
def _lowerCamelCase() -> Any:
_lowerCAmelCase =socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_lowerCAmelCase =socket.gethostname()
_lowerCAmelCase =12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowerCAmelCase =sock.recv(1024 )
if not data:
break
out_file.write(__UpperCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 365 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCamelCase() -> None:
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def _lowerCamelCase(__UpperCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
_lowerCAmelCase =rabinMiller.generate_large_prime(__UpperCamelCase )
print("""Generating prime q...""" )
_lowerCAmelCase =rabinMiller.generate_large_prime(__UpperCamelCase )
_lowerCAmelCase =p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
_lowerCAmelCase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__UpperCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
_lowerCAmelCase =cryptoMath.find_mod_inverse(__UpperCamelCase , (p - 1) * (q - 1) )
_lowerCAmelCase =(n, e)
_lowerCAmelCase =(n, d)
return (public_key, private_key)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
_lowerCAmelCase , _lowerCAmelCase =generate_key(__UpperCamelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 366 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 0 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__A = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_lowerCAmelCase =list(s_dict.keys() )
for key in keys:
_lowerCAmelCase =R""".*/layers_(\d+)"""
_lowerCAmelCase =key
if re.match(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , __UpperCamelCase )
_lowerCAmelCase =R"""(encoder|decoder)\/"""
if re.match(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =re.match(__UpperCamelCase , __UpperCamelCase ).groups()
if groups[0] == "encoder":
_lowerCAmelCase =re.sub(R"""/mlp/""" , R"""/1/mlp/""" , __UpperCamelCase )
_lowerCAmelCase =re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , __UpperCamelCase )
elif groups[0] == "decoder":
_lowerCAmelCase =re.sub(R"""/mlp/""" , R"""/2/mlp/""" , __UpperCamelCase )
_lowerCAmelCase =re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , __UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_lowerCAmelCase =new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(F'''{key} -> {new_key}''' )
_lowerCAmelCase =s_dict.pop(__UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCAmelCase =s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCAmelCase =s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_lowerCAmelCase =s_dict[key].shape[0]
_lowerCAmelCase =s_dict[key]
for idx in range(__UpperCamelCase ):
_lowerCAmelCase =expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(__UpperCamelCase )
return s_dict
__A = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
# Convert a google style config to the hugging face fromat
import regex as re
with open(__UpperCamelCase , """r""" ) as f:
_lowerCAmelCase =f.read()
_lowerCAmelCase =re.findall(R"""(.*) = ([0-9.]*)""" , __UpperCamelCase )
_lowerCAmelCase ={}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_lowerCAmelCase =float(__UpperCamelCase ) if """.""" in value else int(__UpperCamelCase )
_lowerCAmelCase =re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , __UpperCamelCase )[0]
_lowerCAmelCase =str(activation[1] )
_lowerCAmelCase =num_experts
_lowerCAmelCase =SwitchTransformersConfig(**__UpperCamelCase )
return config
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="./" , __UpperCamelCase=8 ) -> Tuple:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_lowerCAmelCase =checkpoints.load_tax_checkpoint(__UpperCamelCase )
if gin_file is not None:
_lowerCAmelCase =convert_gin_to_config(__UpperCamelCase , __UpperCamelCase )
else:
_lowerCAmelCase =SwitchTransformersConfig.from_pretrained(__UpperCamelCase )
_lowerCAmelCase =SwitchTransformersForConditionalGeneration(__UpperCamelCase )
_lowerCAmelCase =flax_params["""target"""]
_lowerCAmelCase =flatten_dict(__UpperCamelCase , sep="""/""" )
_lowerCAmelCase =rename_keys(__UpperCamelCase )
_lowerCAmelCase =unflatten_dict(__UpperCamelCase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
__A = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 367 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 368 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> float:
return 0.0
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> tuple[int | float, int | float]:
_lowerCAmelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =512
_lowerCAmelCase =[1] + [0] * (size - 1)
_lowerCAmelCase =[filter_type.process(__UpperCamelCase ) for item in inputs]
_lowerCAmelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase =np.abs(np.fft.fft(__UpperCamelCase ) )
_lowerCAmelCase =20 * np.logaa(__UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCAmelCase =get_bounds(__UpperCamelCase , __UpperCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__UpperCamelCase )
plt.show()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =512
_lowerCAmelCase =[1] + [0] * (size - 1)
_lowerCAmelCase =[filter_type.process(__UpperCamelCase ) for item in inputs]
_lowerCAmelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase =np.angle(np.fft.fft(__UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__UpperCamelCase , -2 * pi ) )
plt.show()
| 369 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> List[Any]:
_lowerCAmelCase =OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
_lowerCAmelCase ="""segformer.encoder.""" + key
if key.startswith("""backbone""" ):
_lowerCAmelCase =key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase =key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__UpperCamelCase )-1}''' )
if "norm" in key:
_lowerCAmelCase =key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase =key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
_lowerCAmelCase =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__UpperCamelCase )-1}''' )
if "layer_norm1" in key:
_lowerCAmelCase =key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase =key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase =key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase =key.replace(F'''block{idx}''' , F'''block.{int(__UpperCamelCase )-1}''' )
if "attn.q" in key:
_lowerCAmelCase =key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase =key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase =key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase =key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase =key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase =key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase =key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase =key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase =key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__UpperCamelCase )-1}''' )
if key.startswith("""head""" ):
_lowerCAmelCase =key.replace("""head""" , """classifier""" )
_lowerCAmelCase =value
return new_state_dict
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase =state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_lowerCAmelCase =state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase =kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase =kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase =kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase =kv_bias[
config.hidden_sizes[i] :
]
def _lowerCamelCase() -> List[Any]:
_lowerCAmelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =SegformerConfig()
_lowerCAmelCase =False
# set attributes based on model_name
_lowerCAmelCase ="""huggingface/label-files"""
if "segformer" in model_name:
_lowerCAmelCase =model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
_lowerCAmelCase =150
_lowerCAmelCase ="""ade20k-id2label.json"""
_lowerCAmelCase =(1, 150, 128, 128)
elif "city" in model_name:
_lowerCAmelCase =19
_lowerCAmelCase ="""cityscapes-id2label.json"""
_lowerCAmelCase =(1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
_lowerCAmelCase =True
_lowerCAmelCase =model_name[4:6]
_lowerCAmelCase =1000
_lowerCAmelCase ="""imagenet-1k-id2label.json"""
_lowerCAmelCase =(1, 1000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
_lowerCAmelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase =idalabel
_lowerCAmelCase ={v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCAmelCase =[64, 128, 320, 512]
_lowerCAmelCase =256
elif size == "b2":
_lowerCAmelCase =[64, 128, 320, 512]
_lowerCAmelCase =768
_lowerCAmelCase =[3, 4, 6, 3]
elif size == "b3":
_lowerCAmelCase =[64, 128, 320, 512]
_lowerCAmelCase =768
_lowerCAmelCase =[3, 4, 18, 3]
elif size == "b4":
_lowerCAmelCase =[64, 128, 320, 512]
_lowerCAmelCase =768
_lowerCAmelCase =[3, 8, 27, 3]
elif size == "b5":
_lowerCAmelCase =[64, 128, 320, 512]
_lowerCAmelCase =768
_lowerCAmelCase =[3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
_lowerCAmelCase =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase )
# prepare image
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
else:
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
_lowerCAmelCase =rename_keys(__UpperCamelCase , encoder_only=__UpperCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__UpperCamelCase , __UpperCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCAmelCase =False
_lowerCAmelCase =SegformerForImageClassification(__UpperCamelCase )
else:
_lowerCAmelCase =SegformerForSemanticSegmentation(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# forward pass
_lowerCAmelCase =model(__UpperCamelCase )
_lowerCAmelCase =outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCAmelCase =torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCAmelCase =torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCAmelCase =torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCAmelCase =torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCAmelCase =torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCAmelCase =torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCAmelCase =torch.tensor(
[
[
[-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1],
[-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1],
[-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1],
],
[
[-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1],
[-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1],
[-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1],
],
[
[7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2],
[4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1],
[3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCAmelCase =torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_lowerCAmelCase =logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 370 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = MgpstrTokenizer
lowerCamelCase = False
lowerCamelCase = {}
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# fmt: off
_lowerCAmelCase =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCAmelCase =dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase ="""tester"""
_lowerCAmelCase ="""tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCAmelCase ="""[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_lowerCAmelCase =tokenizer.encode([special_token] , add_special_tokens=__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
_lowerCAmelCase =tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCAmelCase , _lowerCAmelCase =self.get_input_output_texts(__UpperCAmelCase )
_lowerCAmelCase =tokenizer.tokenize(__UpperCAmelCase )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_lowerCAmelCase =tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertNotEqual(len(__UpperCAmelCase ) , 0 )
_lowerCAmelCase =tokenizer.decode(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , __UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _lowerCAmelCase ( self ) -> str:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
| 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__A = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def _lowerCamelCase(__UpperCamelCase=True ) -> Optional[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__magic_name__ ) )
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =dataset_module_factory(__UpperCAmelCase , cache_dir=__UpperCAmelCase )
_lowerCAmelCase =import_main_class(dataset_module.module_path , dataset=__UpperCAmelCase )
_lowerCAmelCase =builder_cls(
cache_dir=__UpperCAmelCase , config_name=__UpperCAmelCase , hash=dataset_module.hash , )
_lowerCAmelCase ="""/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__UpperCAmelCase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
_lowerCAmelCase =cached_path(__UpperCAmelCase , cache_dir=__UpperCAmelCase )
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
@pytest.mark.integration
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_lowerCAmelCase =dataset_module_factory("""wikipedia""" , cache_dir=__UpperCamelCase )
_lowerCAmelCase =import_main_class(dataset_module.module_path )
_lowerCAmelCase =builder_cls(
cache_dir=__UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowerCAmelCase =None
builder_instance.download_and_prepare()
_lowerCAmelCase =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase =dataset_module_factory("""wikipedia""" , cache_dir=__UpperCamelCase )
_lowerCAmelCase =import_main_class(dataset_module.module_path , dataset=__UpperCamelCase )
_lowerCAmelCase =builder_cls(
cache_dir=__UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
_lowerCAmelCase =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , __UpperCamelCase )
assert next(iter(ds["""train"""] ) )
| 350 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 0 |
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
_lowerCAmelCase =data
_lowerCAmelCase =previous
_lowerCAmelCase =next_node
def __str__( self ) -> str:
return f'''{self.data}'''
def _lowerCAmelCase ( self ) -> int:
return self.data
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return self.next
def _lowerCAmelCase ( self ) -> Dict:
return self.previous
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =head
def __iter__( self ) -> Union[str, Any]:
return self
def _lowerCAmelCase ( self ) -> List[Any]:
if not self.current:
raise StopIteration
else:
_lowerCAmelCase =self.current.get_data()
_lowerCAmelCase =self.current.get_next()
return value
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Tuple:
_lowerCAmelCase =None # First node in list
_lowerCAmelCase =None # Last node in list
def __str__( self ) -> Union[str, Any]:
_lowerCAmelCase =self.head
_lowerCAmelCase =[]
while current is not None:
nodes.append(current.get_data() )
_lowerCAmelCase =current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =self.head
while current:
if current.get_data() == value:
return True
_lowerCAmelCase =current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def _lowerCAmelCase ( self ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def _lowerCAmelCase ( self ) -> int:
if self.tail:
return self.tail.get_data()
return None
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
_lowerCAmelCase =node
_lowerCAmelCase =node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
_lowerCAmelCase =Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =node
_lowerCAmelCase =node.previous
if node.get_previous() is None:
_lowerCAmelCase =node_to_insert
else:
_lowerCAmelCase =node_to_insert
_lowerCAmelCase =node_to_insert
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =node
_lowerCAmelCase =node.next
if node.get_next() is None:
_lowerCAmelCase =node_to_insert
else:
_lowerCAmelCase =node_to_insert
_lowerCAmelCase =node_to_insert
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =1
_lowerCAmelCase =Node(__UpperCAmelCase )
_lowerCAmelCase =self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
_lowerCAmelCase =node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Node:
_lowerCAmelCase =self.head
while node:
if node.get_data() == item:
return node
_lowerCAmelCase =node.get_next()
raise Exception("""Node not found""" )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict:
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
_lowerCAmelCase =self.head.get_next()
if node == self.tail:
_lowerCAmelCase =self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def _lowerCAmelCase ( __UpperCAmelCase ) -> None:
if node.get_next():
_lowerCAmelCase =node.previous
if node.get_previous():
_lowerCAmelCase =node.next
_lowerCAmelCase =None
_lowerCAmelCase =None
def _lowerCAmelCase ( self ) -> Optional[Any]:
return self.head is None
def _lowerCamelCase() -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
_lowerCAmelCase =[
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =emb.weight.shape
_lowerCAmelCase =nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
_lowerCAmelCase =emb.weight.data
return lin_layer
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase="facebook/mbart-large-en-ro" , __UpperCamelCase=False , __UpperCamelCase=False ) -> str:
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__UpperCamelCase )
_lowerCAmelCase =state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase =MBartConfig.from_pretrained(__UpperCamelCase , vocab_size=__UpperCamelCase )
if mbart_aa and finetuned:
_lowerCAmelCase ="""relu"""
_lowerCAmelCase =state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase =MBartForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase )
if finetuned:
_lowerCAmelCase =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
__A = parser.parse_args()
__A = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 352 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_lowerCAmelCase =jieba
_lowerCAmelCase =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 353 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , **__UpperCAmelCase ) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str:
if "text_queries" in kwargs:
_lowerCAmelCase =kwargs.pop("""text_queries""" )
if isinstance(__UpperCAmelCase , (str, Image.Image) ):
_lowerCAmelCase ={"""image""": image, """candidate_labels""": candidate_labels}
else:
_lowerCAmelCase =image
_lowerCAmelCase =super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
return results
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ={}
if "threshold" in kwargs:
_lowerCAmelCase =kwargs["""threshold"""]
if "top_k" in kwargs:
_lowerCAmelCase =kwargs["""top_k"""]
return {}, {}, postprocess_params
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =load_image(inputs["""image"""] )
_lowerCAmelCase =inputs["""candidate_labels"""]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowerCAmelCase =candidate_labels.split(""",""" )
_lowerCAmelCase =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__UpperCAmelCase ):
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=self.framework )
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_lowerCAmelCase =model_inputs.pop("""target_size""" )
_lowerCAmelCase =model_inputs.pop("""candidate_label""" )
_lowerCAmelCase =model_inputs.pop("""is_last""" )
_lowerCAmelCase =self.model(**__UpperCAmelCase )
_lowerCAmelCase ={"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0.1 , __UpperCAmelCase=None ) -> Tuple:
_lowerCAmelCase =[]
for model_output in model_outputs:
_lowerCAmelCase =model_output["""candidate_label"""]
_lowerCAmelCase =BaseModelOutput(__UpperCAmelCase )
_lowerCAmelCase =self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
_lowerCAmelCase =outputs["""scores"""][index].item()
_lowerCAmelCase =self._get_bounding_box(outputs["""boxes"""][index][0] )
_lowerCAmelCase ={"""score""": score, """label""": label, """box""": box}
results.append(__UpperCAmelCase )
_lowerCAmelCase =sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k:
_lowerCAmelCase =results[:top_k]
return results
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =box.int().tolist()
_lowerCAmelCase ={
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = FunnelTokenizer
lowerCamelCase = FunnelTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def _lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_lowerCAmelCase =[
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Tuple:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase ="""UNwant\u00E9d,running"""
_lowerCAmelCase ="""unwanted, running"""
return input_text, output_text
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer_class(self.vocab_file )
_lowerCAmelCase =tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
_lowerCAmelCase =tokenizer("""UNwant\u00E9d,running""" )
_lowerCAmelCase =len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
_lowerCAmelCase =tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 355 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''xlm-roberta-xl'''
def __init__( self , __UpperCAmelCase=25_08_80 , __UpperCAmelCase=25_60 , __UpperCAmelCase=36 , __UpperCAmelCase=32 , __UpperCAmelCase=1_02_40 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_14 , __UpperCAmelCase=1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-05 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 356 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__A = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__A = []
__A = []
__A = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
__A = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
__A = 0
for log in Path().glob('*.log'):
__A = 0
with open(log, 'r') as f:
for line in f:
__A = json.loads(line)
if line.get('nodeid', '') != "":
__A = line['nodeid']
if line.get('duration', None) is not None:
__A = F"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__A = []
log.unlink()
__A = ''
__A = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__A = []
__A = {}
for test in failed_tests:
__A = test[0].split('::')
__A = data[0].split('/')[-1]
if data[0] not in filesafailed:
__A = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__A = [test[0] for test in failed_table]
__A = list(set(files))
# Count number of instances in failed_tests
__A = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__A = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__A = 'Too many failed tests, please see the full report in the Action results.'
__A = len(err) + 10
__A = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
__A = 'No failed tests! 🤗'
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__A = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__A = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
__A = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
__A = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__A = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__A = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__A = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__A = row[0]
else:
__A = ''
__A = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 357 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str:
if args.student_type == "roberta":
_lowerCAmelCase =False
elif args.student_type == "gpt2":
_lowerCAmelCase =False
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
if args.student_type == "roberta":
_lowerCAmelCase =False
def _lowerCamelCase() -> Any:
_lowerCAmelCase =argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__UpperCamelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__UpperCamelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__UpperCamelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__UpperCamelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__UpperCamelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__UpperCamelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__UpperCamelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__UpperCamelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__UpperCamelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__UpperCamelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__UpperCamelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__UpperCamelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__UpperCamelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__UpperCamelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__UpperCamelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__UpperCamelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__UpperCamelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__UpperCamelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__UpperCamelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__UpperCamelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__UpperCamelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__UpperCamelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__UpperCamelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__UpperCamelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__UpperCamelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__UpperCamelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__UpperCamelCase , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__UpperCamelCase , default=4000 , help="""Checkpoint interval.""" )
_lowerCAmelCase =parser.parse_args()
sanity_checks(__UpperCamelCase )
# ARGS #
init_gpu_params(__UpperCamelCase )
set_seed(__UpperCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__UpperCamelCase ) , __UpperCamelCase , indent=4 )
git_log(args.dump_path )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =MODEL_CLASSES[args.student_type]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCAmelCase =teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCAmelCase ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCAmelCase =tokenizer.all_special_tokens.index(__UpperCamelCase )
_lowerCAmelCase =tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowerCAmelCase =special_tok_ids
_lowerCAmelCase =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
_lowerCAmelCase =pickle.load(__UpperCamelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
_lowerCAmelCase =pickle.load(__UpperCamelCase )
_lowerCAmelCase =np.maximum(__UpperCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCAmelCase =0.0 # do not predict special tokens
_lowerCAmelCase =torch.from_numpy(__UpperCamelCase )
else:
_lowerCAmelCase =None
_lowerCAmelCase =LmSeqsDataset(params=__UpperCamelCase , data=__UpperCamelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowerCAmelCase =student_config_class.from_pretrained(args.student_config )
_lowerCAmelCase =True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowerCAmelCase =student_model_class.from_pretrained(args.student_pretrained_weights , config=__UpperCamelCase )
else:
_lowerCAmelCase =student_model_class(__UpperCamelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
_lowerCAmelCase =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__UpperCamelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__UpperCamelCase , __UpperCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__UpperCamelCase , __UpperCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCAmelCase =Distiller(
params=__UpperCamelCase , dataset=__UpperCamelCase , token_probs=__UpperCamelCase , student=__UpperCamelCase , teacher=__UpperCamelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 358 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
_lowerCAmelCase ={}
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowerCAmelCase =[[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_lowerCAmelCase =[]
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase=-1 ) -> Optional[int]:
if c == -1:
_lowerCAmelCase =floor(random() * 1_00_00 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_lowerCAmelCase =floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_lowerCAmelCase =deque()
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_lowerCAmelCase =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
return len(self.graph[u] )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Dict:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_lowerCAmelCase =time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> str:
_lowerCAmelCase =time()
self.bfs(__UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Any:
_lowerCAmelCase ={}
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Optional[Any]:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowerCAmelCase =[[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowerCAmelCase =[[w, u]]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> List[Any]:
if s == d:
return []
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase=-1 ) -> str:
if c == -1:
_lowerCAmelCase =floor(random() * 1_00_00 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_lowerCAmelCase =floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_lowerCAmelCase =deque()
_lowerCAmelCase =[]
if s == -2:
_lowerCAmelCase =list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_lowerCAmelCase =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
return len(self.graph[u] )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_lowerCAmelCase =-2
_lowerCAmelCase =[]
_lowerCAmelCase =s
_lowerCAmelCase =False
_lowerCAmelCase =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowerCAmelCase =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowerCAmelCase =len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowerCAmelCase =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase =True
if len(__UpperCAmelCase ) != 0:
_lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1]
else:
_lowerCAmelCase =False
indirect_parents.append(__UpperCAmelCase )
_lowerCAmelCase =s
_lowerCAmelCase =ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _lowerCAmelCase ( self ) -> List[Any]:
return list(self.graph )
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[Any]:
_lowerCAmelCase =time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_lowerCAmelCase =time()
self.bfs(__UpperCAmelCase )
_lowerCAmelCase =time()
return end - begin
| 359 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 0 |
"""simple docstring"""
from math import pow
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_lowerCAmelCase =int(pow(__UpperCamelCase , __UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_lowerCAmelCase , _lowerCAmelCase =backtrack(
__UpperCamelCase , __UpperCamelCase , current_number + 1 , __UpperCamelCase , __UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_lowerCAmelCase , _lowerCAmelCase =backtrack(
__UpperCamelCase , __UpperCamelCase , current_number + 1 , __UpperCamelCase , __UpperCamelCase )
return current_sum, solutions_count
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(__UpperCamelCase , __UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A = sys.version_info >= (3, 10)
def _lowerCamelCase(__UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[Any]:
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''titi'''
lowerCamelCase = '''toto'''
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''titi'''
lowerCamelCase = '''toto'''
lowerCamelCase = 42
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = '''toto'''
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =BasicEnum(self.foo )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = '''toto'''
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = field(default=__magic_name__ , metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[1, 2, 3] )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCamelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = field()
lowerCamelCase = field()
lowerCamelCase = field()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = field()
lowerCamelCase = None
lowerCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = field(default=__magic_name__ , metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCAmelCase ={k: v for k, v in vars(__UpperCAmelCase ).items() if k != """container"""}
_lowerCAmelCase ={k: v for k, v in vars(__UpperCAmelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __UpperCAmelCase ) and yy.get("""choices""" , __UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__UpperCAmelCase ) , yy["""type"""](__UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--bar""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--baz""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--flag""" , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs="""?""" )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((_lowerCAmelCase ) , ) =parser.parse_args_into_dataclasses(__UpperCAmelCase , look_for_args_file=__UpperCAmelCase )
self.assertFalse(example.flag )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__UpperCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__UpperCAmelCase , help="""help message""" )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__UpperCAmelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=__UpperCAmelCase , default=__UpperCAmelCase )
_lowerCAmelCase =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_lowerCAmelCase =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCAmelCase =parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_lowerCAmelCase =parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCAmelCase =parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
_lowerCAmelCase =parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowerCAmelCase ( self ) -> List[Any]:
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = '''toto'''
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_lowerCAmelCase =parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_lowerCAmelCase =parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__UpperCAmelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__UpperCAmelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__UpperCAmelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(
__UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCAmelCase =parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument("""--bar""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__UpperCAmelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__UpperCAmelCase )
_lowerCAmelCase =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , bar=__UpperCAmelCase , baz=__UpperCAmelCase , ces=[] , des=[] ) )
_lowerCAmelCase =parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--required_str""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__UpperCAmelCase , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__UpperCAmelCase , )
expected.add_argument("""--opt""" , type=__UpperCAmelCase , default=__UpperCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__UpperCAmelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
_lowerCAmelCase =parser.parse_dict(__UpperCAmelCase )[0]
_lowerCAmelCase =BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__UpperCAmelCase , parser.parse_dict , __UpperCAmelCase , allow_extra_keys=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =os.path.join(__UpperCAmelCase , """temp_json""" )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
_lowerCAmelCase =BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =os.path.join(__UpperCAmelCase , """temp_yaml""" )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
_lowerCAmelCase =BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__A = ['bert-base-uncased', 'bert-base-cased']
__A = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowerCamelCase__ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =tokenizer
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =TFAutoModel.from_config(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Any:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase )
_lowerCAmelCase =self.bert(**__UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =[
BertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCAmelCase =[TFBertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__UpperCAmelCase , use_fast_bert_tokenizer=__UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCAmelCase =[
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_lowerCAmelCase =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCAmelCase ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding="""longest""" )
_lowerCAmelCase =tf_tokenizer(__UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase =tf_tokenizer(self.paired_sentences )
_lowerCAmelCase =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase =tf.function(__UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase =tf.constant(__UpperCAmelCase )
_lowerCAmelCase =compiled_tokenizer(__UpperCAmelCase )
_lowerCAmelCase =tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase =ModelToSave(tokenizer=__UpperCAmelCase )
_lowerCAmelCase =tf.convert_to_tensor(self.test_sentences )
_lowerCAmelCase =model(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase =Path(__UpperCAmelCase ) / """saved.model"""
model.save(__UpperCAmelCase )
_lowerCAmelCase =tf.keras.models.load_model(__UpperCAmelCase )
_lowerCAmelCase =loaded_model(__UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
from __future__ import annotations
__A = 'Muhammad Umer Farooq'
__A = 'MIT'
__A = '1.0.0'
__A = 'Muhammad Umer Farooq'
__A = '[email protected]'
__A = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> None:
super().__init__()
_lowerCAmelCase =[]
_lowerCAmelCase =domain
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_lowerCAmelCase =parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
return ".".join(get_sub_domain_name(__UpperCamelCase ).split(""".""" )[-2:] )
def _lowerCamelCase(__UpperCamelCase ) -> str:
return parse.urlparse(__UpperCamelCase ).netloc
def _lowerCamelCase(__UpperCamelCase = "https://github.com" ) -> list[str]:
_lowerCAmelCase =get_domain_name(__UpperCamelCase )
# Initialize the parser
_lowerCAmelCase =Parser(__UpperCamelCase )
try:
# Open URL
_lowerCAmelCase =requests.get(__UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_lowerCAmelCase =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_lowerCAmelCase =requests.get(__UpperCamelCase )
# Get the valid email.
_lowerCAmelCase =re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__UpperCamelCase )
if __name__ == "__main__":
__A = emails_from_url('https://github.com')
print(F"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 363 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 | 0 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
__A = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
__A = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
__A = tf.keras.preprocessing.image.img_to_array(test_image)
__A = np.expand_dims(test_image, axis=0)
__A = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A = 'Normal'
if result[0][0] == 1:
__A = 'Abnormality detected'
| 364 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase =do_convert_rgb
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_lowerCAmelCase =(size["""height"""], size["""width"""])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_lowerCAmelCase =make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase =[convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_lowerCAmelCase =BatchFeature(data={"""pixel_values""": images} , tensor_type=__UpperCAmelCase )
return encoded_outputs
| 366 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__A = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__A = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[str]:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = CHRF.CHAR_ORDER , __UpperCAmelCase = CHRF.WORD_ORDER , __UpperCAmelCase = CHRF.BETA , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , ) -> Optional[Any]:
_lowerCAmelCase =len(references[0] )
if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_lowerCAmelCase =[[refs[i] for refs in references] for i in range(__UpperCAmelCase )]
_lowerCAmelCase =CHRF(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =sb_chrf.corpus_score(__UpperCAmelCase , __UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 367 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''gpt_bigcode'''
lowerCamelCase = ['''past_key_values''']
lowerCamelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __UpperCAmelCase=5_02_57 , __UpperCAmelCase=10_24 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_pytorch_tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Optional[int]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =n_positions
_lowerCAmelCase =n_embd
_lowerCAmelCase =n_layer
_lowerCAmelCase =n_head
_lowerCAmelCase =n_inner
_lowerCAmelCase =activation_function
_lowerCAmelCase =resid_pdrop
_lowerCAmelCase =embd_pdrop
_lowerCAmelCase =attn_pdrop
_lowerCAmelCase =layer_norm_epsilon
_lowerCAmelCase =initializer_range
_lowerCAmelCase =scale_attn_weights
_lowerCAmelCase =use_cache
_lowerCAmelCase =attention_softmax_in_fpaa
_lowerCAmelCase =scale_attention_softmax_in_fpaa
_lowerCAmelCase =multi_query
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 369 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 0 |
"""simple docstring"""
from math import factorial, radians
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = 18 , __UpperCamelCase = 10 ) -> float:
_lowerCAmelCase =angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_lowerCAmelCase =radians(__UpperCamelCase )
_lowerCAmelCase =angle_in_radians
_lowerCAmelCase =3
_lowerCAmelCase =-1
for _ in range(__UpperCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(__UpperCamelCase )
_lowerCAmelCase =-b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
__import__('doctest').testmod()
| 370 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__UpperCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
def _lowerCamelCase() -> None:
_lowerCAmelCase =[90, 23, 6, 33, 21, 65, 123, 34423]
_lowerCAmelCase =math.log(len(__UpperCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
if len(__UpperCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_lowerCAmelCase =_lowerCAmelCase =sum(array[:k] )
for i in range(len(__UpperCamelCase ) - k ):
_lowerCAmelCase =current_sum - array[i] + array[i + k]
_lowerCAmelCase =max(__UpperCamelCase , __UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__A = [randint(-1000, 1000) for i in range(100)]
__A = randint(0, 110)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 350 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__A = logging.getLogger(__name__)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
# save results
if os.path.exists(__UpperCamelCase ):
if os.path.exists(os.path.join(__UpperCamelCase , """config.json""" ) ) and os.path.isfile(
os.path.join(__UpperCamelCase , """config.json""" ) ):
os.remove(os.path.join(__UpperCamelCase , """config.json""" ) )
if os.path.exists(os.path.join(__UpperCamelCase , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(__UpperCamelCase , """pytorch_model.bin""" ) ):
os.remove(os.path.join(__UpperCamelCase , """pytorch_model.bin""" ) )
else:
os.makedirs(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> Tuple:
_lowerCAmelCase =2
if unlogit:
_lowerCAmelCase =torch.pow(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =p * torch.log(__UpperCamelCase )
_lowerCAmelCase =0
return -plogp.sum(dim=-1 )
def _lowerCamelCase(__UpperCamelCase ) -> str:
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(__UpperCamelCase ) ) ) )
for row in range(len(__UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=False ) -> int:
_lowerCAmelCase , _lowerCAmelCase =model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCAmelCase =torch.zeros(__UpperCamelCase , __UpperCamelCase ).to(args.device )
_lowerCAmelCase =torch.zeros(__UpperCamelCase , __UpperCamelCase ).to(args.device )
if head_mask is None:
_lowerCAmelCase =torch.ones(__UpperCamelCase , __UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCAmelCase =None
_lowerCAmelCase =0.0
_lowerCAmelCase =0.0
for step, inputs in enumerate(tqdm(__UpperCamelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCAmelCase =tuple(t.to(args.device ) for t in inputs )
((_lowerCAmelCase ) , ) =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCAmelCase =model(__UpperCamelCase , labels=__UpperCamelCase , head_mask=__UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__UpperCamelCase ):
_lowerCAmelCase =entropy(attn.detach() , __UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCAmelCase =2
_lowerCAmelCase =torch.pow(torch.pow(__UpperCamelCase , __UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
_lowerCAmelCase =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(__UpperCamelCase )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(__UpperCamelCase )
logger.info("""Head ranked by importance scores""" )
_lowerCAmelCase =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCAmelCase =torch.arange(
head_importance.numel() , device=args.device )
_lowerCAmelCase =head_ranks.view_as(__UpperCamelCase )
print_ad_tensor(__UpperCamelCase )
return attn_entropy, head_importance, total_loss
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase )
_lowerCAmelCase =1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , __UpperCamelCase , original_score * args.masking_threshold )
_lowerCAmelCase =torch.ones_like(__UpperCamelCase )
_lowerCAmelCase =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCAmelCase =original_score
while current_score >= original_score * args.masking_threshold:
_lowerCAmelCase =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCAmelCase =float("""Inf""" )
_lowerCAmelCase =head_importance.view(-1 ).sort()[1]
if len(__UpperCamelCase ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
_lowerCAmelCase =current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
_lowerCAmelCase =new_head_mask.view(-1 )
_lowerCAmelCase =0.0
_lowerCAmelCase =new_head_mask.view_as(__UpperCamelCase )
_lowerCAmelCase =new_head_mask.clone().detach()
print_ad_tensor(__UpperCamelCase )
# Compute metric and head importance again
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase , head_mask=__UpperCamelCase )
_lowerCAmelCase =1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , __UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(__UpperCamelCase )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase , compute_importance=__UpperCamelCase , head_mask=__UpperCamelCase )
_lowerCAmelCase =1 / loss
_lowerCAmelCase =datetime.now() - before_time
_lowerCAmelCase =sum(p.numel() for p in model.parameters() )
_lowerCAmelCase ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =[
v,
]
assert sum(len(__UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__UpperCamelCase )
_lowerCAmelCase =sum(p.numel() for p in model.parameters() )
_lowerCAmelCase =datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =compute_heads_importance(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , compute_entropy=__UpperCamelCase , compute_importance=__UpperCamelCase , head_mask=__UpperCamelCase , actually_pruned=__UpperCamelCase , )
_lowerCAmelCase =1 / loss
_lowerCAmelCase =datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __UpperCamelCase , __UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __UpperCamelCase , __UpperCamelCase )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(__UpperCamelCase , args.output_dir )
def _lowerCamelCase() -> int:
_lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=__UpperCamelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=__UpperCamelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=__UpperCamelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=__UpperCamelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=__UpperCamelCase , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=__UpperCamelCase , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__UpperCamelCase , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=__UpperCamelCase , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=__UpperCamelCase , default=42 )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=__UpperCamelCase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__UpperCamelCase , default="""""" , help="""Can be used for distant debugging.""" )
_lowerCAmelCase =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCAmelCase =torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
_lowerCAmelCase =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCAmelCase =torch.device("""cuda""" , args.local_rank )
_lowerCAmelCase =1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCAmelCase =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCAmelCase =nn.parallel.DistributedDataParallel(
__UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__UpperCamelCase )
elif args.n_gpu > 1:
_lowerCAmelCase =nn.DataParallel(__UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__UpperCamelCase )
torch.save(__UpperCamelCase , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , __UpperCamelCase )
# Prepare dataset
_lowerCAmelCase =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCAmelCase =(torch.from_numpy(__UpperCamelCase ),)
_lowerCAmelCase =TensorDataset(*__UpperCamelCase )
_lowerCAmelCase =RandomSampler(__UpperCamelCase )
_lowerCAmelCase =DataLoader(__UpperCamelCase , sampler=__UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCAmelCase =mask_heads(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
prune_heads(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 352 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCamelCase() -> int:
_lowerCAmelCase =ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=__UpperCamelCase )
_lowerCAmelCase =parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=__UpperCamelCase )
env_command_parser(subparsers=__UpperCamelCase )
launch_command_parser(subparsers=__UpperCamelCase )
tpu_command_parser(subparsers=__UpperCamelCase )
test_command_parser(subparsers=__UpperCamelCase )
# Let's go
_lowerCAmelCase =parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(__UpperCamelCase )
if __name__ == "__main__":
main()
| 353 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
from math import isclose, sqrt
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[float, float, float]:
_lowerCAmelCase =point_y / 4 / point_x
_lowerCAmelCase =2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase =(1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase =outgoing_gradient**2 + 4
_lowerCAmelCase =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase =(point_y - outgoing_gradient * point_x) ** 2 - 100
_lowerCAmelCase =(
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase =(
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase =x_minus if isclose(__UpperCamelCase , __UpperCamelCase ) else x_plus
_lowerCAmelCase =point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCamelCase(__UpperCamelCase = 1.4 , __UpperCamelCase = -9.6 ) -> int:
_lowerCAmelCase =0
_lowerCAmelCase =first_x_coord
_lowerCAmelCase =first_y_coord
_lowerCAmelCase =(10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =next_point(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__A = 5_0003
__A = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = PLBartTokenizer
lowerCamelCase = None
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase =PLBartTokenizer(__UpperCAmelCase , language_codes="""base""" , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =PLBartTokenizer(__UpperCAmelCase , language_codes="""base""" , keep_accents=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase =tokenizer.vocab_size
_lowerCAmelCase =[tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 4 , __UpperCAmelCase )]
self.assertListEqual(__UpperCAmelCase , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
_lowerCAmelCase ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_lowerCAmelCase =tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =PLBartTokenizer(__UpperCAmelCase , language_codes="""multi""" , keep_accents=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase =tokenizer.vocab_size
_lowerCAmelCase =[tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 7 , __UpperCAmelCase )]
self.assertListEqual(
__UpperCAmelCase , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
_lowerCAmelCase ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_lowerCAmelCase =tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCAmelCase ( cls ) -> Union[str, Any]:
_lowerCAmelCase =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
_lowerCAmelCase =1
return cls
def _lowerCAmelCase ( self ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
_lowerCAmelCase =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
_lowerCAmelCase =self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __UpperCAmelCase )
_lowerCAmelCase =10
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =tempfile.mkdtemp()
_lowerCAmelCase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase )
_lowerCAmelCase =PLBartTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase )
@require_torch
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="""pt""" )
_lowerCAmelCase =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_lowerCAmelCase =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCAmelCase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="""pt""" )
_lowerCAmelCase =self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="""pt""" )
_lowerCAmelCase =targets["""input_ids"""]
_lowerCAmelCase =shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 355 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__A = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__A = 'cuda' if torch.cuda.is_available() else 'cpu'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=100 , __UpperCamelCase=" " ) -> List[str]:
_lowerCAmelCase =text.split(__UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
def _lowerCamelCase(__UpperCamelCase ) -> dict:
_lowerCAmelCase , _lowerCAmelCase =[], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(__UpperCamelCase ):
titles.append(title if title is not None else """""" )
texts.append(__UpperCamelCase )
return {"title": titles, "text": texts}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> dict:
_lowerCAmelCase =ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
_lowerCAmelCase =ctx_encoder(input_ids.to(device=__UpperCamelCase ) , return_dict=__UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> int:
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowerCAmelCase =load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowerCAmelCase =dataset.map(__UpperCamelCase , batched=__UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowerCAmelCase =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCamelCase )
_lowerCAmelCase =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowerCAmelCase =Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
_lowerCAmelCase =dataset.map(
partial(__UpperCamelCase , ctx_encoder=__UpperCamelCase , ctx_tokenizer=__UpperCamelCase ) , batched=__UpperCamelCase , batch_size=processing_args.batch_size , features=__UpperCamelCase , )
# And finally save your dataset
_lowerCAmelCase =os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(__UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowerCAmelCase =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCamelCase )
# And save the index
_lowerCAmelCase =os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(__UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = field(
default=str(Path(__magic_name__ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowerCamelCase = field(
default=__magic_name__ , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowerCamelCase = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowerCamelCase = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowerCamelCase = field(
default=str(Path(__magic_name__ ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = field(
default=__magic_name__ , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowerCamelCase = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__A = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__A , __A , __A = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__A = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 356 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 0 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = DownBlockaD # noqa F405
lowerCamelCase = '''down'''
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =[-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase = '''down'''
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =[0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = AttnDownBlockaD # noqa F405
lowerCamelCase = '''down'''
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =[0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = CrossAttnDownBlockaD # noqa F405
lowerCamelCase = '''down'''
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase =32
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =[0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase = '''down'''
@property
def _lowerCAmelCase ( self ) -> List[Any]:
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase =32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =[0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = SkipDownBlockaD # noqa F405
lowerCamelCase = '''down'''
@property
def _lowerCAmelCase ( self ) -> Any:
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =[-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = AttnSkipDownBlockaD # noqa F405
lowerCamelCase = '''down'''
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =[0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = DownEncoderBlockaD # noqa F405
lowerCamelCase = '''down'''
@property
def _lowerCAmelCase ( self ) -> str:
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase ={
"""in_channels""": 32,
"""out_channels""": 32,
}
_lowerCAmelCase =self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =[1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase = '''down'''
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ={
"""in_channels""": 32,
"""out_channels""": 32,
}
_lowerCAmelCase =self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =[0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = UNetMidBlockaD # noqa F405
lowerCamelCase = '''mid'''
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={
"""in_channels""": 32,
"""temb_channels""": 1_28,
}
_lowerCAmelCase =self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =[-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase = '''mid'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase =32
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =[0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase = '''mid'''
@property
def _lowerCAmelCase ( self ) -> str:
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase =32
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =[0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = UpBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =[-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =[0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = CrossAttnUpBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase =32
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =[-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase , include_encoder_hidden_states=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common()
_lowerCAmelCase =32
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =[0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = AttnUpBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =[0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = SkipUpBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =[-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = AttnSkipUpBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =[0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = UpDecoderBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Any:
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase ={"""in_channels""": 32, """out_channels""": 32}
_lowerCAmelCase =self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =[0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(__UpperCAmelCase )
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase = '''up'''
@property
def _lowerCAmelCase ( self ) -> Tuple:
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase ={"""in_channels""": 32, """out_channels""": 32}
_lowerCAmelCase =self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =[0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(__UpperCAmelCase )
| 357 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """width_multiplier""" ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase="swish" , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=None , __UpperCAmelCase=0.2_5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , ) -> Optional[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =make_divisible(5_12 * width_multiplier , divisor=8 )
_lowerCAmelCase =hidden_act
_lowerCAmelCase =conv_kernel_size
_lowerCAmelCase =output_stride
_lowerCAmelCase =classifier_dropout_prob
_lowerCAmelCase =use_labels
_lowerCAmelCase =is_training
_lowerCAmelCase =num_labels
_lowerCAmelCase =initializer_range
_lowerCAmelCase =scope
_lowerCAmelCase =width_multiplier
_lowerCAmelCase =ffn_dropout
_lowerCAmelCase =attn_dropout
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self ) -> str:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =MobileViTVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =MobileViTVaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =MobileViTVaForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =MobileViTVaModelTester(self )
_lowerCAmelCase =MobileViTVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase =2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =MobileViTVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCamelCase() -> Optional[Any]:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> Tuple:
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__UpperCAmelCase )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
# verify the logits
_lowerCAmelCase =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_lowerCAmelCase =model.to(__UpperCAmelCase )
_lowerCAmelCase =MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
_lowerCAmelCase =outputs.logits
# verify the logits
_lowerCAmelCase =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_lowerCAmelCase =model.to(__UpperCAmelCase )
_lowerCAmelCase =MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
_lowerCAmelCase =outputs.logits.detach().cpu()
_lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] )
_lowerCAmelCase =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
_lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
_lowerCAmelCase =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 358 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341 | 0 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 359 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self ) -> int:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def __UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__UpperCAmelCase )
@property
def __UpperCAmelCase ( self ) -> Optional[Any]:
def extract(*__UpperCAmelCase , **__UpperCAmelCase ):
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
_lowerCAmelCase =torch.ones([0] )
def __UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
self.pixel_values.to(__UpperCAmelCase )
return self
return Out()
return extract
def __UpperCAmelCase ( self ) -> str:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet
_lowerCAmelCase =PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase =77
_lowerCAmelCase =self.dummy_image.to(__UpperCAmelCase )
_lowerCAmelCase =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =AltDiffusionImgaImgPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase )
_lowerCAmelCase =alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =alt_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =alt_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __UpperCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet
_lowerCAmelCase =PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase =77
_lowerCAmelCase =self.dummy_image.to(__UpperCAmelCase )
# put models in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =vae.half()
_lowerCAmelCase =bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =AltDiffusionImgaImgPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase )
_lowerCAmelCase =alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =alt_pipe(
[prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __UpperCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase =init_image.resize((7_60, 5_04) )
_lowerCAmelCase ="""BAAI/AltDiffusion"""
_lowerCAmelCase =AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""A fantasy landscape, trending on artstation"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
_lowerCAmelCase =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowerCAmelCase =np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase =init_image.resize((7_68, 5_12) )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_lowerCAmelCase ="""BAAI/AltDiffusion"""
_lowerCAmelCase =AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""A fantasy landscape, trending on artstation"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 360 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
# Construct model
if gpta_config_file == "":
_lowerCAmelCase =GPTaConfig()
else:
_lowerCAmelCase =GPTaConfig.from_json_file(__UpperCamelCase )
_lowerCAmelCase =GPTaModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
_lowerCAmelCase =pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_lowerCAmelCase =pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __UpperCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''AutoImageProcessor'''
lowerCamelCase = '''AutoTokenizer'''
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[int]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =self.image_processor
_lowerCAmelCase =False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =kwargs.pop("""images""" , __UpperCAmelCase )
_lowerCAmelCase =kwargs.pop("""text""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_lowerCAmelCase =args[0]
_lowerCAmelCase =args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCAmelCase =encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def _lowerCAmelCase ( self ) -> int:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_lowerCAmelCase =True
_lowerCAmelCase =self.tokenizer
yield
_lowerCAmelCase =self.image_processor
_lowerCAmelCase =False
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=None ) -> Tuple:
if added_vocab is None:
_lowerCAmelCase =self.tokenizer.get_added_vocab()
_lowerCAmelCase ={}
while tokens:
_lowerCAmelCase =re.search(r"""<s_(.*?)>""" , __UpperCAmelCase , re.IGNORECASE )
if start_token is None:
break
_lowerCAmelCase =start_token.group(1 )
_lowerCAmelCase =re.search(rf'''</s_{key}>''' , __UpperCAmelCase , re.IGNORECASE )
_lowerCAmelCase =start_token.group()
if end_token is None:
_lowerCAmelCase =tokens.replace(__UpperCAmelCase , """""" )
else:
_lowerCAmelCase =end_token.group()
_lowerCAmelCase =re.escape(__UpperCAmelCase )
_lowerCAmelCase =re.escape(__UpperCAmelCase )
_lowerCAmelCase =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCAmelCase , re.IGNORECASE )
if content is not None:
_lowerCAmelCase =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_lowerCAmelCase =self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if value:
if len(__UpperCAmelCase ) == 1:
_lowerCAmelCase =value[0]
_lowerCAmelCase =value
else: # leaf nodes
_lowerCAmelCase =[]
for leaf in content.split(r"""<sep/>""" ):
_lowerCAmelCase =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_lowerCAmelCase =leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCAmelCase )
if len(output[key] ) == 1:
_lowerCAmelCase =output[key][0]
_lowerCAmelCase =tokens[tokens.find(__UpperCAmelCase ) + len(__UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if len(__UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowerCAmelCase ( self ) -> str:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCAmelCase , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCAmelCase , )
return self.image_processor
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =str(id_ )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =[]
_lowerCAmelCase ={} # {vertex:distance}
def __lt__( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.key < other.key
def __repr__( self ) -> Any:
return self.id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
self.neighbors.append(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_lowerCAmelCase =weight
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list:
_lowerCAmelCase =[]
for u in graph:
_lowerCAmelCase =math.inf
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =graph[:]
while q:
_lowerCAmelCase =min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase =u
_lowerCAmelCase =u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Iterator[tuple]:
for u in graph:
_lowerCAmelCase =math.inf
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
_lowerCAmelCase =hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase =u
_lowerCAmelCase =u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowerCamelCase() -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 | 0 |
"""simple docstring"""
import math
import sys
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =""""""
try:
with open(__UpperCamelCase , """rb""" ) as binary_file:
_lowerCAmelCase =binary_file.read()
for dat in data:
_lowerCAmelCase =F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase ={"""0""": """0""", """1""": """1"""}
_lowerCAmelCase , _lowerCAmelCase ="""""", """"""
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase =lexicon[curr_string]
result += last_match_id
_lowerCAmelCase =last_match_id + """0"""
if math.loga(__UpperCamelCase ).is_integer():
_lowerCAmelCase ={}
for curr_key in list(__UpperCamelCase ):
_lowerCAmelCase =lexicon.pop(__UpperCamelCase )
_lowerCAmelCase =new_lex
_lowerCAmelCase =last_match_id + """1"""
index += 1
_lowerCAmelCase =""""""
return result
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =8
try:
with open(__UpperCamelCase , """wb""" ) as opened_file:
_lowerCAmelCase =[
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCAmelCase =data_bits[counter:]
_lowerCAmelCase =data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =read_file_binary(__UpperCamelCase )
_lowerCAmelCase =remove_prefix(__UpperCamelCase )
_lowerCAmelCase =decompress_data(__UpperCamelCase )
write_file_binary(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 364 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """num_attention_heads""" ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[1_28, 2_56, 3_84] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =kernel_size
_lowerCAmelCase =stride
_lowerCAmelCase =padding
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =depths
_lowerCAmelCase =key_dim
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =patch_size
_lowerCAmelCase =attention_ratio
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =initializer_range
_lowerCAmelCase =[
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =num_labels
_lowerCAmelCase =initializer_range
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> str:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_lowerCAmelCase =LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =(self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowerCAmelCase =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =LevitModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _lowerCAmelCase ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ) -> List[str]:
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def _lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
_lowerCAmelCase =(self.model_tester.image_size, self.model_tester.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowerCAmelCase =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase ).loss
loss.backward()
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase =False
_lowerCAmelCase =True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase ).loss
loss.backward()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =[
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
_lowerCAmelCase =problem_type["""title"""]
_lowerCAmelCase =problem_type["""num_labels"""]
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
_lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
_lowerCAmelCase =inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
_lowerCAmelCase =inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
_lowerCAmelCase =model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _lowerCAmelCase ( self ) -> str:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCamelCase() -> Dict:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> Optional[Any]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
# verify the logits
_lowerCAmelCase =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 365 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 366 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = XLMProphetNetTokenizer
lowerCamelCase = False
lowerCamelCase = True
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase =XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""[PAD]"""
_lowerCAmelCase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__UpperCAmelCase ) , 10_12 )
def _lowerCAmelCase ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def _lowerCAmelCase ( self ) -> int:
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase ="""Hello World!"""
_lowerCAmelCase =[3_53_89, 66_72, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def _lowerCAmelCase ( self ) -> Any:
# fmt: off
_lowerCAmelCase ={"""input_ids""": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 367 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def _lowerCamelCase(__UpperCamelCase ) -> List[List[ImageInput]]:
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCamelCase ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =size if size is not None else {"""shortest_edge""": 2_24}
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_lowerCAmelCase =crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =resample
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
_lowerCAmelCase =get_resize_output_image_size(__UpperCAmelCase , size["""shortest_edge"""] , default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
_lowerCAmelCase =(size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase =to_numpy_array(__UpperCAmelCase )
if do_resize:
_lowerCAmelCase =self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase )
if do_center_crop:
_lowerCAmelCase =self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase )
if do_rescale:
_lowerCAmelCase =self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase )
if do_normalize:
_lowerCAmelCase =self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase )
_lowerCAmelCase =to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase )
return image
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_lowerCAmelCase =make_batched(__UpperCAmelCase )
_lowerCAmelCase =[
[
self._preprocess_image(
image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , )
for img in video
]
for video in videos
]
_lowerCAmelCase ={"""pixel_values""": videos}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 368 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> int:
try:
_lowerCAmelCase =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCAmelCase =default
else:
# KEY is set, convert it to True or False.
try:
_lowerCAmelCase =strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
__A = parse_flag_from_env('RUN_SLOW', default=False)
def _lowerCamelCase(__UpperCamelCase ) -> Any:
return unittest.skip("""Test was skipped""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[Any]:
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version(""">=""" , __UpperCamelCase ) , F'''test requires torch version >= {version}''' )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__UpperCamelCase )
__A = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__UpperCamelCase )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = True
@classmethod
def _lowerCAmelCase ( cls ) -> Any:
_lowerCAmelCase =tempfile.mkdtemp()
@classmethod
def _lowerCAmelCase ( cls ) -> Optional[int]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowerCAmelCase ( self ) -> List[Any]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__UpperCAmelCase )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase =mocks if isinstance(__UpperCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
_lowerCAmelCase =AcceleratorState()
_lowerCAmelCase =tensor[None].clone().to(state.device )
_lowerCAmelCase =gather(__UpperCamelCase ).cpu()
_lowerCAmelCase =tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_lowerCAmelCase =returncode
_lowerCAmelCase =stdout
_lowerCAmelCase =stderr
async def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
while True:
_lowerCAmelCase =await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(__UpperCamelCase ) )
_lowerCAmelCase =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCAmelCase =[]
_lowerCAmelCase =[]
def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ):
_lowerCAmelCase =line.decode("""utf-8""" ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=180 , __UpperCamelCase=False , __UpperCamelCase=True ) -> _RunOutput:
_lowerCAmelCase =asyncio.get_event_loop()
_lowerCAmelCase =loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
_lowerCAmelCase =""" """.join(__UpperCamelCase )
if result.returncode > 0:
_lowerCAmelCase ="""\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
pass
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> int:
try:
_lowerCAmelCase =subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , """decode""" ):
_lowerCAmelCase =output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 369 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__A = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
__A = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__A = soup.find('meta', {'property': 'og:image'})['content']
__A = requests.get(image_url).content
__A = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 370 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 0 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__A = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__A = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__A = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =spearmanr(__UpperCAmelCase , __UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Any = PegasusTokenizer
lowerCamelCase__: Optional[int] = PegasusTokenizerFast
lowerCamelCase__: Optional[Any] = True
lowerCamelCase__: Dict = True
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Any = PegasusTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Dict ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def _lowerCamelCase ( self: List[str] , **__lowerCamelCase: Tuple ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] ) -> List[str]:
return ("This is a test", "This is a test")
def _lowerCamelCase ( self: List[str] ) -> str:
__UpperCAmelCase : Dict = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Any:
__UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(__lowerCamelCase ) , 11_03 )
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
__UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[int] = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__UpperCAmelCase : Optional[int] = tokenizer([raw_input_str] , return_tensors=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : List[str] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__UpperCAmelCase : Any = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__UpperCAmelCase : Tuple = tokenizer([raw_input_str] , return_tensors=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowerCamelCase ( self: int ) -> Dict:
__UpperCAmelCase : Tuple = ["This is going to be way too long." * 1_50, "short example"]
__UpperCAmelCase : Any = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : Dict = self._large_tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
__UpperCAmelCase : Any = self._large_tokenizer(
text_target=__lowerCamelCase , max_length=5 , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def _lowerCamelCase ( self: int ) -> List[str]:
# fmt: off
__UpperCAmelCase : str = {"input_ids": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = PegasusTokenizer
lowerCamelCase__: Union[str, Any] = PegasusTokenizerFast
lowerCamelCase__: List[Any] = True
lowerCamelCase__: Optional[Any] = True
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Optional[Any] = PegasusTokenizer(__lowerCamelCase , offset=0 , mask_token_sent=__lowerCamelCase , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def _lowerCamelCase ( self: Union[str, Any] , **__lowerCamelCase: Union[str, Any] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[Any] ) -> Optional[Any]:
return ("This is a test", "This is a test")
def _lowerCamelCase ( self: Tuple ) -> str:
__UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : str = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
__UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Tuple = ["This is going to be way too long." * 10_00, "short example"]
__UpperCAmelCase : Union[str, Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : Tuple = self._large_tokenizer(__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
__UpperCAmelCase : int = self._large_tokenizer(
text_target=__lowerCamelCase , max_length=5 , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCamelCase ) == 2 # input_ids, attention_mask.
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : Any = self._large_tokenizer(__lowerCamelCase ).input_ids
self.assertListEqual(
__lowerCamelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 342 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 1 |
from __future__ import annotations
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> float:
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, ) -> float:
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case__, nominal_annual_percentage_rate / 365, number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
_snake_case = 300 # TEMPERATURE (unit = K)
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, ) -> float:
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
from __future__ import annotations
class _snake_case :
def __init__( self: int , __lowerCamelCase: int = 0 ) -> Any:
__UpperCAmelCase : List[Any] = key
def _lowerCamelCase ( self: Any , __lowerCamelCase: str , __lowerCamelCase: int ) -> list[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: int ) -> list[str]:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__lowerCamelCase ) ^ key ) for ch in content]
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: int = 0 ) -> str:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__UpperCAmelCase : List[Any] = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: int = 0 ) -> str:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : int = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__UpperCAmelCase : int = ""
for ch in content:
ans += chr(ord(__lowerCamelCase ) ^ key )
return ans
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: int = 0 ) -> bool:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: int ) -> bool:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
try:
with open(__lowerCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__lowerCamelCase , __lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 342 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> List[str]:
__UpperCAmelCase : List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__UpperCAmelCase : Optional[int] = 128
elif "12-12" in model_name:
__UpperCAmelCase : int = 12
__UpperCAmelCase : int = 12
elif "14-14" in model_name:
__UpperCAmelCase : Dict = 14
__UpperCAmelCase : Dict = 14
elif "16-16" in model_name:
__UpperCAmelCase : Optional[Any] = 16
__UpperCAmelCase : List[str] = 16
else:
raise ValueError("Model not supported" )
__UpperCAmelCase : Tuple = "huggingface/label-files"
if "speech-commands" in model_name:
__UpperCAmelCase : List[Any] = 35
__UpperCAmelCase : Optional[int] = "speech-commands-v2-id2label.json"
else:
__UpperCAmelCase : Tuple = 527
__UpperCAmelCase : str = "audioset-id2label.json"
__UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Any = idalabel
__UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__ ) -> Dict:
if "module.v" in name:
__UpperCAmelCase : Tuple = name.replace("module.v", "audio_spectrogram_transformer" )
if "cls_token" in name:
__UpperCAmelCase : Optional[int] = name.replace("cls_token", "embeddings.cls_token" )
if "dist_token" in name:
__UpperCAmelCase : Any = name.replace("dist_token", "embeddings.distillation_token" )
if "pos_embed" in name:
__UpperCAmelCase : Any = name.replace("pos_embed", "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__UpperCAmelCase : int = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
__UpperCAmelCase : Tuple = name.replace("blocks", "encoder.layer" )
if "attn.proj" in name:
__UpperCAmelCase : Dict = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
__UpperCAmelCase : int = name.replace("attn", "attention.self" )
if "norm1" in name:
__UpperCAmelCase : Any = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase : Dict = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase : int = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase : Any = name.replace("mlp.fc2", "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__UpperCAmelCase : str = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
__UpperCAmelCase : Tuple = name.replace("module.mlp_head.0", "classifier.layernorm" )
if "module.mlp_head.1" in name:
__UpperCAmelCase : List[str] = name.replace("module.mlp_head.1", "classifier.dense" )
return name
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Any = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__UpperCAmelCase : Any = key.split("." )
__UpperCAmelCase : Dict = int(key_split[3] )
__UpperCAmelCase : str = config.hidden_size
if "weight" in key:
__UpperCAmelCase : Tuple = val[:dim, :]
__UpperCAmelCase : int = val[dim : dim * 2, :]
__UpperCAmelCase : Optional[Any] = val[-dim:, :]
else:
__UpperCAmelCase : Optional[int] = val[:dim]
__UpperCAmelCase : int = val[dim : dim * 2]
__UpperCAmelCase : Any = val[-dim:]
else:
__UpperCAmelCase : List[Any] = val
return orig_state_dict
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(snake_case__, snake_case__ )
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = get_audio_spectrogram_transformer_config(snake_case__ )
__UpperCAmelCase : Optional[int] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
__UpperCAmelCase : str = model_name_to_url[model_name]
__UpperCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )
# remove some keys
remove_keys(snake_case__ )
# rename some keys
__UpperCAmelCase : Optional[Any] = convert_state_dict(snake_case__, snake_case__ )
# load 🤗 model
__UpperCAmelCase : Union[str, Any] = ASTForAudioClassification(snake_case__ )
model.eval()
model.load_state_dict(snake_case__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__UpperCAmelCase : List[Any] = -4.267_7393 if "speech-commands" not in model_name else -6.84_5978
__UpperCAmelCase : Optional[Any] = 4.568_9974 if "speech-commands" not in model_name else 5.565_4526
__UpperCAmelCase : Optional[Any] = 1024 if "speech-commands" not in model_name else 128
__UpperCAmelCase : int = ASTFeatureExtractor(mean=snake_case__, std=snake_case__, max_length=snake_case__ )
if "speech-commands" in model_name:
__UpperCAmelCase : str = load_dataset("speech_commands", "v0.02", split="validation" )
__UpperCAmelCase : List[str] = dataset[0]["audio"]["array"]
else:
__UpperCAmelCase : Dict = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset", )
__UpperCAmelCase , __UpperCAmelCase : List[str] = torchaudio.load(snake_case__ )
__UpperCAmelCase : Dict = waveform.squeeze().numpy()
__UpperCAmelCase : Tuple = feature_extractor(snake_case__, sampling_rate=1_6000, return_tensors="pt" )
# forward pass
__UpperCAmelCase : Tuple = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__UpperCAmelCase : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__UpperCAmelCase : Optional[Any] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__UpperCAmelCase : Any = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__UpperCAmelCase : Any = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__UpperCAmelCase : List[Any] = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__UpperCAmelCase : Union[str, Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3], snake_case__, atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(snake_case__ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f'''MIT/{model_name}''' )
feature_extractor.push_to_hub(f'''MIT/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 1 |
import math
def _UpperCamelCase ( snake_case__ ) -> bool:
assert isinstance(snake_case__, snake_case__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__UpperCAmelCase : Optional[Any] = range(3, int(math.sqrt(snake_case__ ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( snake_case__, snake_case__=1, **snake_case__ ) -> List[Any]:
__UpperCAmelCase : List[str] = factor * value
__UpperCAmelCase : Dict = value
while not is_prime(snake_case__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **snake_case__ )
return value
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_snake_case = '''src/transformers'''
_snake_case = '''docs/source/en'''
_snake_case = '''.'''
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> List[str]:
with open(snake_case__, "r", encoding="utf-8", newline="\n" ) as f:
__UpperCAmelCase : List[Any] = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Optional[int] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCAmelCase : List[str] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_snake_case = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_snake_case = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _UpperCamelCase ( snake_case__ ) -> List[str]:
__UpperCAmelCase : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", snake_case__ )
return [m.group(0 ) for m in matches]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = 2 if text == "✅" or text == "❌" else len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = (width - text_length) // 2
__UpperCAmelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__UpperCAmelCase : Tuple = {name: config.replace("Config", "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__UpperCAmelCase : List[Any] = collections.defaultdict(snake_case__ )
__UpperCAmelCase : List[str] = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Tuple = collections.defaultdict(snake_case__ )
__UpperCAmelCase : int = collections.defaultdict(snake_case__ )
__UpperCAmelCase : Union[str, Any] = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
__UpperCAmelCase : Optional[Any] = None
if attr_name.endswith("Tokenizer" ):
__UpperCAmelCase : Optional[Any] = slow_tokenizers
__UpperCAmelCase : Optional[Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
__UpperCAmelCase : Tuple = fast_tokenizers
__UpperCAmelCase : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Dict = tf_models
__UpperCAmelCase : str = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
__UpperCAmelCase : Union[str, Any] = flax_models
__UpperCAmelCase : Any = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
__UpperCAmelCase : List[Any] = pt_models
__UpperCAmelCase : Optional[Any] = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
__UpperCAmelCase : Dict = True
break
# Try again after removing the last word in the name
__UpperCAmelCase : List[Any] = "".join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
__UpperCAmelCase : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__UpperCAmelCase : Tuple = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__UpperCAmelCase : Dict = [len(snake_case__ ) + 2 for c in columns]
__UpperCAmelCase : List[str] = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
__UpperCAmelCase : int = "|" + "|".join([_center_text(snake_case__, snake_case__ ) for c, w in zip(snake_case__, snake_case__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
__UpperCAmelCase : str = {True: "✅", False: "❌"}
for name in model_names:
__UpperCAmelCase : Dict = model_name_to_prefix[name]
__UpperCAmelCase : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__, snake_case__ ) for l, w in zip(snake_case__, snake_case__ )] ) + "|\n"
return table
def _UpperCamelCase ( snake_case__=False ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = _find_text_in_file(
filename=os.path.join(snake_case__, "index.md" ), start_prompt="<!--This table is updated automatically from the auto modules", end_prompt="<!-- End table-->", )
__UpperCAmelCase : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__, "index.md" ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 342 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = '''RegNetConfig'''
# Base docstring
_snake_case = '''facebook/regnet-y-040'''
_snake_case = [1, 1088, 7, 7]
# Image classification docstring
_snake_case = '''facebook/regnet-y-040'''
_snake_case = '''tabby, tabby cat'''
_snake_case = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: int = 3 , __lowerCamelCase: int = 1 , __lowerCamelCase: int = 1 , __lowerCamelCase: Optional[str] = "relu" , **__lowerCamelCase: Any , ) -> Dict:
super().__init__(**__lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=__lowerCamelCase , kernel_size=__lowerCamelCase , strides=__lowerCamelCase , padding="VALID" , groups=__lowerCamelCase , use_bias=__lowerCamelCase , name="convolution" , )
__UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
__UpperCAmelCase : List[Any] = ACTaFN[activation] if activation is not None else tf.identity
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[str] ) -> Union[str, Any]:
__UpperCAmelCase : int = self.convolution(self.padding(__lowerCamelCase ) )
__UpperCAmelCase : str = self.normalization(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = self.activation(__lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: str , __lowerCamelCase: RegNetConfig , **__lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Tuple = config.num_channels
__UpperCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Dict ) -> int:
__UpperCAmelCase : str = shape_list(__lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCAmelCase : Dict = tf.transpose(__lowerCamelCase , perm=(0, 2, 3, 1) )
__UpperCAmelCase : Optional[int] = self.embedder(__lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: Dict , __lowerCamelCase: int , __lowerCamelCase: int = 2 , **__lowerCamelCase: List[str] ) -> Dict:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Any = tf.keras.layers.ConvaD(
filters=__lowerCamelCase , kernel_size=1 , strides=__lowerCamelCase , use_bias=__lowerCamelCase , name="convolution" )
__UpperCAmelCase : Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: tf.Tensor , __lowerCamelCase: bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(__lowerCamelCase ) , training=__lowerCamelCase )
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: int , __lowerCamelCase: int , __lowerCamelCase: int , **__lowerCamelCase: int ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase , name="pooler" )
__UpperCAmelCase : str = [
tf.keras.layers.ConvaD(filters=__lowerCamelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__lowerCamelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def _lowerCamelCase ( self: Any , __lowerCamelCase: str ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCAmelCase : Any = self.pooler(__lowerCamelCase )
for layer_module in self.attention:
__UpperCAmelCase : List[str] = layer_module(__lowerCamelCase )
__UpperCAmelCase : List[str] = hidden_state * pooled
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: Union[str, Any] , __lowerCamelCase: RegNetConfig , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int = 1 , **__lowerCamelCase: int ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Tuple = in_channels != out_channels or stride != 1
__UpperCAmelCase : Dict = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(__lowerCamelCase , stride=__lowerCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCAmelCase : List[str] = [
TFRegNetConvLayer(__lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__lowerCamelCase , stride=__lowerCamelCase , groups=__lowerCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__lowerCamelCase , kernel_size=1 , activation=__lowerCamelCase , name="layer.2" ),
]
__UpperCAmelCase : Any = ACTaFN[config.hidden_act]
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Tuple = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : Dict = layer_module(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = self.shortcut(__lowerCamelCase )
hidden_state += residual
__UpperCAmelCase : int = self.activation(__lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: str , __lowerCamelCase: RegNetConfig , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int = 1 , **__lowerCamelCase: int ) -> Optional[Any]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : str = in_channels != out_channels or stride != 1
__UpperCAmelCase : Tuple = max(1 , out_channels // config.groups_width )
__UpperCAmelCase : Dict = (
TFRegNetShortCut(__lowerCamelCase , stride=__lowerCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCAmelCase : Optional[Any] = [
TFRegNetConvLayer(__lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__lowerCamelCase , stride=__lowerCamelCase , groups=__lowerCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__lowerCamelCase , kernel_size=1 , activation=__lowerCamelCase , name="layer.3" ),
]
__UpperCAmelCase : List[Any] = ACTaFN[config.hidden_act]
def _lowerCamelCase ( self: Any , __lowerCamelCase: str ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = hidden_state
for layer_module in self.layers:
__UpperCAmelCase : str = layer_module(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.shortcut(__lowerCamelCase )
hidden_state += residual
__UpperCAmelCase : List[Any] = self.activation(__lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: Dict , __lowerCamelCase: RegNetConfig , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int = 2 , __lowerCamelCase: int = 2 , **__lowerCamelCase: Any ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCAmelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , stride=__lowerCamelCase , name="layers.0" ),
*[layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Any ) -> Dict:
for layer_module in self.layers:
__UpperCAmelCase : Union[str, Any] = layer_module(__lowerCamelCase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self: List[str] , __lowerCamelCase: RegNetConfig , **__lowerCamelCase: Optional[Any] ) -> str:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCAmelCase : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , depth=__lowerCamelCase , name=f'''stages.{i+1}''' ) )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: tf.Tensor , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCAmelCase : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase : Dict = hidden_states + (hidden_state,)
__UpperCAmelCase : Union[str, Any] = stage_module(__lowerCamelCase )
if output_hidden_states:
__UpperCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase , hidden_states=__lowerCamelCase )
@keras_serializable
class _snake_case ( tf.keras.layers.Layer ):
lowerCamelCase__: int = RegNetConfig
def __init__( self: Dict , __lowerCamelCase: Any , **__lowerCamelCase: Union[str, Any] ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : Any = config
__UpperCAmelCase : List[Any] = TFRegNetEmbeddings(__lowerCamelCase , name="embedder" )
__UpperCAmelCase : Tuple = TFRegNetEncoder(__lowerCamelCase , name="encoder" )
__UpperCAmelCase : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase , name="pooler" )
@unpack_inputs
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: tf.Tensor , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[int] = self.embedder(__lowerCamelCase , training=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = self.encoder(
__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , training=__lowerCamelCase )
__UpperCAmelCase : Dict = encoder_outputs[0]
__UpperCAmelCase : Optional[Any] = self.pooler(__lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCAmelCase : Optional[int] = tf.transpose(__lowerCamelCase , perm=(0, 3, 1, 2) )
__UpperCAmelCase : int = tf.transpose(__lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCAmelCase : Dict = tuple([tf.transpose(__lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase , pooler_output=__lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = RegNetConfig
lowerCamelCase__: List[Any] = "regnet"
lowerCamelCase__: Dict = "pixel_values"
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
_snake_case = r'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_snake_case = r'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _lowercase , )
class _snake_case ( _lowercase ):
def __init__( self: List[str] , __lowerCamelCase: RegNetConfig , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Tuple ) -> Tuple:
super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : str = TFRegNetMainLayer(__lowerCamelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: tf.Tensor , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[int] = self.regnet(
pixel_values=__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , training=__lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _lowercase , )
class _snake_case ( _lowercase , _lowercase ):
def __init__( self: Tuple , __lowerCamelCase: RegNetConfig , *__lowerCamelCase: str , **__lowerCamelCase: List[str] ) -> List[str]:
super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = config.num_labels
__UpperCAmelCase : Union[str, Any] = TFRegNetMainLayer(__lowerCamelCase , name="regnet" )
# classification head
__UpperCAmelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: tf.Tensor = None , __lowerCamelCase: tf.Tensor = None , __lowerCamelCase: bool = None , __lowerCamelCase: bool = None , __lowerCamelCase: Union[str, Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase : Optional[int] = self.regnet(
__lowerCamelCase , output_hidden_states=__lowerCamelCase , return_dict=__lowerCamelCase , training=__lowerCamelCase )
__UpperCAmelCase : str = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase : Optional[int] = self.classifier[0](__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = self.classifier[1](__lowerCamelCase )
__UpperCAmelCase : Optional[int] = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase , logits=__lowerCamelCase )
if not return_dict:
__UpperCAmelCase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states )
| 342 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = IFPipeline
lowerCamelCase__: Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCamelCase__: Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__: Tuple = PipelineTesterMixin.required_optional_params - {"latents"}
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
return self._get_dummy_components()
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Dict , __lowerCamelCase: int=0 ) -> Dict:
if str(__lowerCamelCase ).startswith("mps" ):
__UpperCAmelCase : int = torch.manual_seed(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__UpperCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self: int ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCamelCase ( self: str ) -> Dict:
self._test_save_load_local()
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: str ) -> Optional[int]:
# if
__UpperCAmelCase : int = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
__UpperCAmelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
__UpperCAmelCase , __UpperCAmelCase : Dict = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__UpperCAmelCase : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
__UpperCAmelCase : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__UpperCAmelCase : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
__UpperCAmelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] ) -> int:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : Optional[int] = pipe_a(
prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , num_inference_steps=2 , generator=__lowerCamelCase , output_type="np" , )
__UpperCAmelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , )
__UpperCAmelCase : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__UpperCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Dict ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCamelCase )
__UpperCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , image=__lowerCamelCase , num_inference_steps=2 , generator=__lowerCamelCase , output_type="np" , )
__UpperCAmelCase : str = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowerCamelCase )
__UpperCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCamelCase )
__UpperCAmelCase : Dict = pipe_a(
prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , image=__lowerCamelCase , original_image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , )
__UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: str ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCamelCase )
__UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__lowerCamelCase )
__UpperCAmelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : List[Any] = pipe_a(
prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , num_inference_steps=2 , generator=__lowerCamelCase , output_type="np" , )
__UpperCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
__UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__UpperCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__UpperCAmelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowerCamelCase )
__UpperCAmelCase : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , original_image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , )
__UpperCAmelCase : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__UpperCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__UpperCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 342 | import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
import math
class _snake_case :
def __init__( self: str , __lowerCamelCase: List[Any]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
__UpperCAmelCase : Optional[Any] = n
__UpperCAmelCase : Tuple = [
[math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase )
] # adjacency matrix for weight
__UpperCAmelCase : List[Any] = [
[math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : List[Any] = w
def _lowerCamelCase ( self: Any ) -> str:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__UpperCAmelCase : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_snake_case = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_snake_case = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_snake_case = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_snake_case = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_snake_case = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Any:
for tf_name, hf_name in patterns:
__UpperCAmelCase : Optional[int] = k.replace(snake_case__, snake_case__ )
return k
def _UpperCamelCase ( snake_case__, snake_case__ ) -> BigBirdPegasusForConditionalGeneration:
__UpperCAmelCase : Dict = BigBirdPegasusConfig(**snake_case__ )
__UpperCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(snake_case__ )
__UpperCAmelCase : Optional[Any] = torch_model.state_dict()
__UpperCAmelCase : Optional[int] = {}
# separating decoder weights
__UpperCAmelCase : List[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__UpperCAmelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : Optional[int] = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : List[str] = DECODER_PATTERNS
__UpperCAmelCase : str = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : Optional[int] = v.T
__UpperCAmelCase : str = torch.from_numpy(snake_case__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
__UpperCAmelCase : int = [k.endswith(snake_case__ ) for ending in KEYS_TO_IGNORE]
if any(snake_case__ ):
continue
__UpperCAmelCase : Optional[Any] = REMAINING_PATTERNS
__UpperCAmelCase : Optional[int] = rename_state_dict_key(snake_case__, snake_case__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__UpperCAmelCase : List[Any] = v.T
__UpperCAmelCase : List[str] = torch.from_numpy(snake_case__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__UpperCAmelCase : List[Any] = mapping["model.embed_positions.weight"]
__UpperCAmelCase : Optional[Any] = mapping.pop("model.embed_positions.weight" )
__UpperCAmelCase , __UpperCAmelCase : Any = torch_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : str = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def _UpperCamelCase ( snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = tf.train.list_variables(snake_case__ )
__UpperCAmelCase : List[str] = {}
__UpperCAmelCase : str = ["global_step"]
for name, shape in tqdm(snake_case__, desc="converting tf checkpoint to dict" ):
__UpperCAmelCase : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : Optional[Any] = tf.train.load_variable(snake_case__, snake_case__ )
__UpperCAmelCase : Tuple = array
return tf_weights
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : str = get_tf_weights_as_numpy(snake_case__ )
__UpperCAmelCase : List[Any] = convert_bigbird_pegasus(snake_case__, snake_case__ )
torch_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
_snake_case = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 342 | 1 |
import itertools
import math
def _UpperCamelCase ( snake_case__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(snake_case__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : int = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def _UpperCamelCase ( snake_case__ = 1_0001 ) -> int:
return next(itertools.islice(prime_generator(), nth - 1, snake_case__ ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 342 | import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Any = ["image_processor", "tokenizer"]
lowerCamelCase__: Optional[Any] = "BlipImageProcessor"
lowerCamelCase__: Optional[int] = "AutoTokenizer"
def __init__( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__UpperCAmelCase : Dict = qformer_tokenizer
def __call__( self: Any , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__UpperCAmelCase : str = BatchFeature()
if text is not None:
__UpperCAmelCase : Any = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__UpperCAmelCase : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = qformer_text_encoding.pop("input_ids" )
__UpperCAmelCase : Optional[int] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def _lowerCamelCase ( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: Any ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Any , **__lowerCamelCase: Dict ) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : str = self.tokenizer.model_input_names
__UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[Any] ) -> str:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Tuple , __lowerCamelCase: Tuple , **__lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
__UpperCAmelCase : List[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 342 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( _lowercase ):
lowerCamelCase__: UNetaDModel
lowerCamelCase__: ScoreSdeVeScheduler
def __init__( self: List[Any] , __lowerCamelCase: UNetaDModel , __lowerCamelCase: ScoreSdeVeScheduler ) -> Optional[int]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self: Union[str, Any] , __lowerCamelCase: int = 1 , __lowerCamelCase: int = 20_00 , __lowerCamelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase: Optional[str] = "pil" , __lowerCamelCase: bool = True , **__lowerCamelCase: Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCAmelCase : List[Any] = self.unet.config.sample_size
__UpperCAmelCase : Dict = (batch_size, 3, img_size, img_size)
__UpperCAmelCase : Optional[int] = self.unet
__UpperCAmelCase : Optional[Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase ) * self.scheduler.init_noise_sigma
__UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCamelCase )
self.scheduler.set_sigmas(__lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCAmelCase : Dict = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
__UpperCAmelCase : Optional[int] = self.scheduler.step_correct(__lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# prediction step
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , __lowerCamelCase ).sample
__UpperCAmelCase : Tuple = self.scheduler.step_pred(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase : Optional[int] = sample_mean.clamp(0 , 1 )
__UpperCAmelCase : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Dict = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_snake_case = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_snake_case = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_snake_case = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_snake_case = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_snake_case = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__: List[str] = DPRContextEncoderTokenizer
class _snake_case ( _lowercase ):
lowerCamelCase__: Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__: Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__: Any = DPRQuestionEncoderTokenizer
_snake_case = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_snake_case = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_snake_case = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowercase )
class _snake_case :
def __call__( self: str , __lowerCamelCase: int , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Union[bool, str] = False , __lowerCamelCase: Union[bool, str] = False , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: Optional[bool] = None , **__lowerCamelCase: Optional[int] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
__UpperCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Dict = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
__UpperCAmelCase : Dict = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
__UpperCAmelCase : str = len(__lowerCamelCase )
__UpperCAmelCase : List[str] = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
assert len(__lowerCamelCase ) == len(
__lowerCamelCase ), f'''There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts.'''
__UpperCAmelCase : int = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
__UpperCAmelCase : Tuple = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
__UpperCAmelCase : List[Any] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : Tuple = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: BatchEncoding , __lowerCamelCase: DPRReaderOutput , __lowerCamelCase: int = 16 , __lowerCamelCase: int = 64 , __lowerCamelCase: int = 4 , ) -> List[DPRSpanPrediction]:
__UpperCAmelCase : Union[str, Any] = reader_input["input_ids"]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = reader_output[:3]
__UpperCAmelCase : int = len(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
__UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__UpperCAmelCase : Dict = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : int = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : Union[str, Any] = len(__lowerCamelCase )
__UpperCAmelCase : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , __lowerCamelCase: int , __lowerCamelCase: int , ) -> List[DPRSpanPrediction]:
__UpperCAmelCase : Tuple = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : str = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
__UpperCAmelCase : Any = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowercase )
class _snake_case ( _lowercase , _lowercase ):
lowerCamelCase__: Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__: int = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__: Union[str, Any] = ["input_ids", "attention_mask"]
lowerCamelCase__: str = DPRReaderTokenizer
| 342 | import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = CanineTokenizer
lowerCamelCase__: Optional[int] = False
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
super().setUp()
__UpperCAmelCase : Tuple = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self: Union[str, Any] ) -> List[Any]:
return CanineTokenizer.from_pretrained("google/canine-s" )
def _lowerCamelCase ( self: Any , **__lowerCamelCase: List[Any] ) -> CanineTokenizer:
__UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 10_24
return tokenizer
@require_torch
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Union[str, Any] = self.canine_tokenizer
__UpperCAmelCase : List[str] = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
__UpperCAmelCase : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : Dict = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
__UpperCAmelCase : Union[str, Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertIn("token_type_ids" , __lowerCamelCase )
@require_torch
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.canine_tokenizer
__UpperCAmelCase : int = [
"What's the weater?",
"It's about 25 degrees.",
]
__UpperCAmelCase : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : List[Any] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Dict = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__UpperCAmelCase : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCAmelCase : Tuple = chr(0xE_0_0_7 )
additional_special_tokens.append(__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn(__lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.get_clean_sequence(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : int = 0xE_0_0_5
__UpperCAmelCase : Tuple = chr(__lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__UpperCAmelCase : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Dict = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : int = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , input_encoded + special_token_id )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : Optional[int] = chr(0xE_0_0_5 )
__UpperCAmelCase : List[str] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
__UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCamelCase )
self.assertEqual(token_a[0] , __lowerCamelCase )
@require_tokenizers
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
__UpperCAmelCase : int = chr(__lowerCamelCase )
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCamelCase )
tokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Tuple = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__lowerCamelCase )
# a special token for Canine can be defined as follows:
__UpperCAmelCase : Any = 0xE_0_0_6
__UpperCAmelCase : Union[str, Any] = chr(__lowerCamelCase )
__UpperCAmelCase : Dict = [new_token_a]
__UpperCAmelCase : int = [new_token_a]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCAmelCase : List[Any] = 0xE_0_0_7
__UpperCAmelCase : List[Any] = chr(__lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : str = [AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase )]
__UpperCAmelCase : Dict = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , extra_ids=0 )
self.assertIn(__lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : int = "hello world"
if self.space_between_special_tokens:
__UpperCAmelCase : Any = "[CLS] hello world [SEP]"
else:
__UpperCAmelCase : Union[str, Any] = input
__UpperCAmelCase : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.decode(__lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCamelCase , [output, output.lower()] )
def _lowerCamelCase ( self: Dict ) -> Any:
__UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase : List[str] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
__UpperCAmelCase : List[str] = "a"
__UpperCAmelCase : Any = ord(__lowerCamelCase )
for attr in attributes_list:
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , attr + "_id" , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(getattr(__lowerCamelCase , attr + "_id" ) , __lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [] )
__UpperCAmelCase : Tuple = 0xE_0_0_6
__UpperCAmelCase : Optional[Any] = chr(__lowerCamelCase )
setattr(__lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
pass
def _lowerCamelCase ( self: Any ) -> Any:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: Optional[int] ) -> Any:
pass
def _lowerCamelCase ( self: List[str] ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
pass
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
pass
def _lowerCamelCase ( self: str ) -> Tuple:
pass
| 342 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: str=13 , __lowerCamelCase: int=32 , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: List[Any]=4 , __lowerCamelCase: int=[10, 20, 30, 40] , __lowerCamelCase: Union[str, Any]=[2, 2, 3, 2] , __lowerCamelCase: str=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[str]=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: List[str]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Any=3 , __lowerCamelCase: Any=None , ) -> Optional[int]:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : str = image_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Dict = num_stages
__UpperCAmelCase : Any = hidden_sizes
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Dict = out_features
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Union[str, Any] = scope
__UpperCAmelCase : Union[str, Any] = num_stages
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__lowerCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Any ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = UperNetForSemanticSegmentation(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Tuple = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase__: Any = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase__: str = False
lowerCamelCase__: List[str] = False
lowerCamelCase__: List[Any] = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: Optional[int] = False
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Optional[Any] = UperNetModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: Tuple ) -> Dict:
return
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Dict:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowerCamelCase ( self: Dict ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self: int ) -> str:
pass
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] ):
__UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Any = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = _config_zero_init(__lowerCamelCase )
__UpperCAmelCase : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def _lowerCamelCase ( self: Tuple ) -> int:
pass
@slow
def _lowerCamelCase ( self: int ) -> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = UperNetForSemanticSegmentation.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> int:
__UpperCAmelCase : Tuple = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k", repo_type="dataset", filename="ADE_val_00000001.jpg" )
__UpperCAmelCase : Union[str, Any] = Image.open(snake_case__ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: str ) -> str:
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__UpperCAmelCase : Optional[int] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**__lowerCamelCase )
__UpperCAmelCase : Optional[int] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : int = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__UpperCAmelCase : int = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__lowerCamelCase )
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : int = processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**__lowerCamelCase )
__UpperCAmelCase : Any = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , __lowerCamelCase: NestedDataStructureLike[PathLike] , __lowerCamelCase: Optional[NamedSplit] = None , __lowerCamelCase: Optional[Features] = None , __lowerCamelCase: str = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[int] = None , **__lowerCamelCase: Tuple , ) -> str:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Union[str, Any] = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
__UpperCAmelCase : int = Text(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__UpperCAmelCase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 342 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_snake_case = '''hf-internal-testing/tiny-random-bert'''
_snake_case = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
_snake_case = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: int ) -> str:
__UpperCAmelCase : Dict = cached_file(__lowerCamelCase , __lowerCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCamelCase , __lowerCamelCase ) ) )
with open(os.path.join(__lowerCamelCase , "refs" , "main" ) ) as f:
__UpperCAmelCase : Optional[int] = f.read()
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , "snapshots" , __lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# File is cached at the same place the second time.
__UpperCAmelCase : Tuple = cached_file(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# Using a specific revision to test the full commit hash.
__UpperCAmelCase : Optional[Any] = cached_file(__lowerCamelCase , __lowerCamelCase , revision="9b8c223" )
self.assertEqual(__lowerCamelCase , os.path.join(__lowerCamelCase , "snapshots" , __lowerCamelCase , __lowerCamelCase ) )
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
with self.assertRaisesRegex(__lowerCamelCase , "is not a valid model identifier" ):
__UpperCAmelCase : Optional[Any] = cached_file("tiny-random-bert" , __lowerCamelCase )
with self.assertRaisesRegex(__lowerCamelCase , "is not a valid git identifier" ):
__UpperCAmelCase : str = cached_file(__lowerCamelCase , __lowerCamelCase , revision="aaaa" )
with self.assertRaisesRegex(__lowerCamelCase , "does not appear to have a file named" ):
__UpperCAmelCase : List[Any] = cached_file(__lowerCamelCase , "conf" )
def _lowerCamelCase ( self: str ) -> Optional[int]:
with self.assertRaisesRegex(__lowerCamelCase , "does not appear to have a file named" ):
__UpperCAmelCase : int = cached_file(__lowerCamelCase , "conf" )
with open(os.path.join(__lowerCamelCase , "refs" , "main" ) ) as f:
__UpperCAmelCase : Optional[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , ".no_exist" , __lowerCamelCase , "conf" ) ) )
__UpperCAmelCase : List[str] = cached_file(__lowerCamelCase , "conf" , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
__UpperCAmelCase : Tuple = cached_file(__lowerCamelCase , "conf" , local_files_only=__lowerCamelCase , _raise_exceptions_for_missing_entries=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = mock.Mock()
__UpperCAmelCase : Union[str, Any] = 5_00
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : str = HTTPError
__UpperCAmelCase : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head:
__UpperCAmelCase : Dict = cached_file(__lowerCamelCase , "conf" , _raise_exceptions_for_connection_errors=__lowerCamelCase )
self.assertIsNone(__lowerCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self: Dict ) -> Optional[Any]:
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , __lowerCamelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , __lowerCamelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , __lowerCamelCase ) )
def _lowerCamelCase ( self: Tuple ) -> Dict:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCamelCase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , __lowerCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCamelCase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , __lowerCamelCase , revision="ahaha" )
__UpperCAmelCase : Optional[int] = get_file_from_repo("bert-base-cased" , __lowerCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__UpperCAmelCase : Tuple = json.loads(open(__lowerCamelCase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 7_68 )
def _lowerCamelCase ( self: Optional[int] ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCamelCase , "a.txt" ) , str(__lowerCamelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCamelCase , "b.txt" ) )
| 342 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple=13 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=3 , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: Union[str, Any]=[2, 2, 3, 2] , __lowerCamelCase: Dict=True , __lowerCamelCase: Dict=True , __lowerCamelCase: int=37 , __lowerCamelCase: str="gelu" , __lowerCamelCase: Optional[int]=10 , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: Any=["stage2", "stage3", "stage4"] , __lowerCamelCase: int=[2, 3, 4] , __lowerCamelCase: List[str]=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : str = num_stages
__UpperCAmelCase : Optional[Any] = hidden_sizes
__UpperCAmelCase : Optional[Any] = depths
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = out_features
__UpperCAmelCase : int = out_indices
__UpperCAmelCase : Optional[int] = scope
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = ConvNextModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: str ) -> int:
__UpperCAmelCase : Optional[int] = ConvNextForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Any = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : str = None
__UpperCAmelCase : List[str] = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> Tuple:
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: Dict = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Union[str, Any] = True
lowerCamelCase__: str = False
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: Dict = False
lowerCamelCase__: Optional[Any] = False
def _lowerCamelCase ( self: Dict ) -> Tuple:
__UpperCAmelCase : Dict = ConvNextModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: Any ) -> Union[str, Any]:
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def _lowerCamelCase ( self: Tuple ) -> List[str]:
pass
def _lowerCamelCase ( self: str ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> int:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Union[str, Any]:
def check_hidden_states_output(__lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] ):
__UpperCAmelCase : List[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> str:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: List[str] ) -> str:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = ConvNextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[str]:
__UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(__lowerCamelCase )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : Dict = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class _snake_case ( unittest.TestCase , _lowercase ):
lowerCamelCase__: Optional[int] = (ConvNextBackbone,) if is_torch_available() else ()
lowerCamelCase__: Optional[Any] = ConvNextConfig
lowerCamelCase__: Optional[Any] = False
def _lowerCamelCase ( self: Tuple ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = ConvNextModelTester(self )
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = ["image_processor", "tokenizer"]
lowerCamelCase__: int = "OwlViTImageProcessor"
lowerCamelCase__: str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: Tuple , __lowerCamelCase: Any=None , __lowerCamelCase: int=None , **__lowerCamelCase: Optional[int] ) -> str:
__UpperCAmelCase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
__UpperCAmelCase : Any = kwargs.pop("feature_extractor" )
__UpperCAmelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self: Dict , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Tuple=None , __lowerCamelCase: Dict=None , __lowerCamelCase: Any="max_length" , __lowerCamelCase: List[Any]="np" , **__lowerCamelCase: Tuple ) -> Dict:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )):
__UpperCAmelCase : int = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ):
__UpperCAmelCase : str = []
# Maximum number of queries across batch
__UpperCAmelCase : Optional[int] = max([len(__lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase ) != max_num_queries:
__UpperCAmelCase : Union[str, Any] = t + [" "] * (max_num_queries - len(__lowerCamelCase ))
__UpperCAmelCase : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
encodings.append(__lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCAmelCase : Union[str, Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCAmelCase : Optional[int] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCAmelCase : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCAmelCase : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCAmelCase : Optional[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCAmelCase : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCAmelCase : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCAmelCase : Dict = BatchEncoding()
__UpperCAmelCase : Optional[int] = input_ids
__UpperCAmelCase : int = attention_mask
if query_images is not None:
__UpperCAmelCase : int = BatchEncoding()
__UpperCAmelCase : Optional[int] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values
__UpperCAmelCase : Union[str, Any] = query_pixel_values
if images is not None:
__UpperCAmelCase : str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
__UpperCAmelCase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCAmelCase : List[str] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Tuple ) -> Dict:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: int , *__lowerCamelCase: str , **__lowerCamelCase: Optional[int] ) -> List[Any]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str , *__lowerCamelCase: Tuple , **__lowerCamelCase: Optional[Any] ) -> Union[str, Any]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: str ) -> Any:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: Any ) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Optional[int] ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 342 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 1 |
_snake_case = [0, 2, 4, 6, 8]
_snake_case = [1, 3, 5, 7, 9]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__UpperCAmelCase : Optional[int] = 0
for digit in range(10 ):
__UpperCAmelCase : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, snake_case__, snake_case__ )
return result
__UpperCAmelCase : Tuple = 0
for digita in range(10 ):
__UpperCAmelCase : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
__UpperCAmelCase : List[str] = ODD_DIGITS
else:
__UpperCAmelCase : Any = EVEN_DIGITS
for digita in other_parity_digits:
__UpperCAmelCase : List[Any] = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, snake_case__, snake_case__, )
return result
def _UpperCamelCase ( snake_case__ = 9 ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(snake_case__, 0, [0] * length, snake_case__ )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 342 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 1 |
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = '''Hello world! cécé herlolip'''
_snake_case = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : List[Any] = BertAbsConfig(
temp_dir=".", finetune_bert=snake_case__, large=snake_case__, share_emb=snake_case__, use_bert_emb=snake_case__, encoder="bert", max_pos=512, enc_layers=6, enc_hidden_size=512, enc_heads=8, enc_ff_size=512, enc_dropout=0.2, dec_layers=6, dec_hidden_size=768, dec_heads=8, dec_ff_size=2048, dec_dropout=0.2, )
__UpperCAmelCase : str = torch.load(snake_case__, lambda snake_case__, snake_case__ : storage )
__UpperCAmelCase : str = AbsSummarizer(snake_case__, torch.device("cpu" ), snake_case__ )
original.eval()
__UpperCAmelCase : Optional[Any] = BertAbsSummarizer(snake_case__, torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__UpperCAmelCase : Any = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__UpperCAmelCase : int = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) )
__UpperCAmelCase : str = torch.tensor(snake_case__ ).unsqueeze(0 )
__UpperCAmelCase : Optional[int] = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) )
__UpperCAmelCase : Any = torch.tensor(snake_case__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase : str = encoder_input_ids
__UpperCAmelCase : Dict = decoder_input_ids
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : List[str] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase : Optional[int] = original(snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )[0]
__UpperCAmelCase : str = original.generator(snake_case__ )
__UpperCAmelCase : Dict = new_model(
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )[0]
__UpperCAmelCase : Any = new_model.generator(snake_case__ )
__UpperCAmelCase : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case__ ) )
__UpperCAmelCase : List[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case__ ) )
__UpperCAmelCase : Union[str, Any] = torch.allclose(snake_case__, snake_case__, atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 342 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase : int = [144, 192, 240]
__UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase : Optional[Any] = [96, 120, 144]
__UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase : str = [64, 80, 96]
__UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
__UpperCAmelCase : Tuple = 0.05
__UpperCAmelCase : Dict = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : str = 512
__UpperCAmelCase : Any = 16
__UpperCAmelCase : str = 21
__UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json"
else:
__UpperCAmelCase : Optional[Any] = 1000
__UpperCAmelCase : int = "imagenet-1k-id2label.json"
__UpperCAmelCase : Dict = "huggingface/label-files"
__UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple:
for i in range(1, 6 ):
if f'''layer_{i}.''' in name:
__UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
__UpperCAmelCase : Optional[int] = name.replace(".block.", "." )
if "exp_1x1" in name:
__UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." )
if ".conv." in name:
__UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if f'''.{i}.{j}.''' in name:
__UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if f'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" )
if f'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase : Any = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
__UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
__UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase : List[str] = "mobilevit." + name
return name
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]:
if base_model:
__UpperCAmelCase : Optional[int] = ""
else:
__UpperCAmelCase : Tuple = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__UpperCAmelCase : str = key[8:]
if "qkv" in key:
__UpperCAmelCase : Tuple = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1
__UpperCAmelCase : Optional[Any] = int(key_split[3] )
__UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase : Optional[Any] = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase : Any = val[:dim, :]
__UpperCAmelCase : Any = val[dim : dim * 2, :]
__UpperCAmelCase : List[Any] = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : Optional[Any] = val[dim : dim * 2]
__UpperCAmelCase : List[Any] = val[-dim:]
else:
__UpperCAmelCase : str = val
return orig_state_dict
def _UpperCamelCase ( ) -> Any:
__UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]:
__UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ )
# load original state_dict
__UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval()
__UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
__UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" )
__UpperCAmelCase : Dict = model(**snake_case__ )
__UpperCAmelCase : Tuple = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase : Any = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCAmelCase : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__, organization="apple" )
model.push_to_hub(snake_case__, organization="apple" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 342 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
_snake_case = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : Dict = os.path.dirname(os.path.realpath(snake_case__ ) )
__UpperCAmelCase : Dict = os.path.join(snake_case__, "words.txt" )
__UpperCAmelCase : List[Any] = ""
with open(snake_case__ ) as f:
__UpperCAmelCase : str = f.readline()
__UpperCAmelCase : Optional[Any] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
__UpperCAmelCase : Optional[Any] = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 342 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : int = [0] * len(snake_case__ )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [1] * len(snake_case__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case__ ) ):
if indegree[i] == 0:
queue.append(snake_case__ )
while queue:
__UpperCAmelCase : List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(snake_case__ )
print(max(snake_case__ ) )
# Adjacency list of Graph
_snake_case = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 342 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _snake_case :
def __init__( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any]=13 , __lowerCamelCase: str=7 , __lowerCamelCase: str=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Any=True , __lowerCamelCase: int=True , __lowerCamelCase: Dict=99 , __lowerCamelCase: Any=32 , __lowerCamelCase: str=2 , __lowerCamelCase: int=4 , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Dict="gelu" , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: List[str]=0.1 , __lowerCamelCase: Optional[int]=5_12 , __lowerCamelCase: List[Any]=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Dict=0.02 , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Union[str, Any]="None" , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: int=4 , __lowerCamelCase: int=None , ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : Tuple = num_choices
__UpperCAmelCase : Optional[Any] = relative_attention
__UpperCAmelCase : int = position_biased_input
__UpperCAmelCase : List[str] = pos_att_type
__UpperCAmelCase : str = scope
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Tuple = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : Tuple = TFDebertaVaModel(config=__lowerCamelCase )
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : int = [input_ids, input_mask]
__UpperCAmelCase : str = model(__lowerCamelCase )
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] ) -> List[str]:
__UpperCAmelCase : int = TFDebertaVaForMaskedLM(config=__lowerCamelCase )
__UpperCAmelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: List[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : List[str] = TFDebertaVaForSequenceClassification(config=__lowerCamelCase )
__UpperCAmelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Tuple = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Any ) -> Dict:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : List[str] = TFDebertaVaForTokenClassification(config=__lowerCamelCase )
__UpperCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: str ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[str] = config_and_inputs
__UpperCAmelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__: int = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Any = False
lowerCamelCase__: Dict = False
def _lowerCamelCase ( self: int ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = TFDebertaVaModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> List[str]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Dict = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _snake_case ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _lowerCamelCase ( self: str ) -> Tuple:
pass
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCAmelCase : Optional[Any] = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__UpperCAmelCase : str = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 )
| 342 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
__UpperCAmelCase : Dict = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
__UpperCAmelCase : Dict = self.diffusers_dir
shutil.copy(
os.path.join(__lowerCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[str] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: str , __lowerCamelCase: Dict , __lowerCamelCase: str=None ) -> str:
__UpperCAmelCase : Tuple = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__UpperCAmelCase : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__UpperCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
__UpperCAmelCase : Any = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__UpperCAmelCase : Tuple = os.path.join(self.diffusers_dir , "new_code.py" )
with open(__lowerCamelCase , "w" , newline="\n" ) as f:
f.write(__lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCamelCase )
with open(__lowerCamelCase , "r" ) as f:
self.assertTrue(f.read() , __lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
__UpperCAmelCase : List[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Tuple:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , __lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , __lowerCamelCase ) , )
# Copy consistency with a really long name
__UpperCAmelCase : Optional[Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , f'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , __lowerCamelCase , __lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , __lowerCamelCase , overwrite_result=re.sub("DDPM" , "Test" , __lowerCamelCase ) , )
| 342 | from __future__ import annotations
from math import pi
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class _snake_case ( _lowercase ):
lowerCamelCase__: int = VOCAB_FILES_NAMES
lowerCamelCase__: Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Any = ["input_ids", "attention_mask"]
lowerCamelCase__: List[str] = GPTaTokenizer
def __init__( self: Tuple , __lowerCamelCase: int=None , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: List[str]=None , __lowerCamelCase: Optional[int]="<|endoftext|>" , __lowerCamelCase: Tuple="<|endoftext|>" , __lowerCamelCase: Any="<|endoftext|>" , __lowerCamelCase: str=False , **__lowerCamelCase: Optional[Any] , ) -> Dict:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_bos_token" , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
__UpperCAmelCase : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
__UpperCAmelCase : str = add_prefix_space
__UpperCAmelCase : Optional[int] = pre_tok_class(**__lowerCamelCase )
__UpperCAmelCase : int = add_prefix_space
def _lowerCamelCase ( self: Tuple , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Dict ) -> BatchEncoding:
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Any , *__lowerCamelCase: str , **__lowerCamelCase: Any ) -> BatchEncoding:
__UpperCAmelCase : Dict = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def _lowerCamelCase ( self: int , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : str = input_ids[-self.model_max_length :]
return input_ids
| 342 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: List[Any]=2 , __lowerCamelCase: Tuple=3 , __lowerCamelCase: str=16 , __lowerCamelCase: Union[str, Any]=[32, 64, 1_28] , __lowerCamelCase: List[Any]=[1, 2, 1] , __lowerCamelCase: Dict=[2, 2, 4] , __lowerCamelCase: Dict=2 , __lowerCamelCase: Any=2.0 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: str=0.0 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Any=False , __lowerCamelCase: str=True , __lowerCamelCase: int=0.02 , __lowerCamelCase: Optional[Any]=1e-5 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=10 , __lowerCamelCase: Optional[int]=8 , __lowerCamelCase: Optional[int]=["stage1", "stage2"] , __lowerCamelCase: List[str]=[1, 2] , ) -> List[str]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Dict = embed_dim
__UpperCAmelCase : Tuple = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Tuple = num_heads
__UpperCAmelCase : List[Any] = window_size
__UpperCAmelCase : Tuple = mlp_ratio
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : int = drop_path_rate
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Dict = use_absolute_embeddings
__UpperCAmelCase : Tuple = patch_norm
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : str = encoder_stride
__UpperCAmelCase : Any = out_features
__UpperCAmelCase : Any = out_indices
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = FocalNetModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
__UpperCAmelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self: int , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] ) -> str:
__UpperCAmelCase : str = FocalNetBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Dict = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCAmelCase : Any = None
__UpperCAmelCase : Optional[Any] = FocalNetBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] ) -> Tuple:
__UpperCAmelCase : int = FocalNetForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : str = 1
__UpperCAmelCase : Optional[int] = FocalNetForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : str = self.type_sequence_label_size
__UpperCAmelCase : List[str] = FocalNetForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = FocalNetForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = config_and_inputs
__UpperCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: List[str] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: List[str] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: List[str] = False
lowerCamelCase__: Any = False
lowerCamelCase__: Dict = False
lowerCamelCase__: Union[str, Any] = False
lowerCamelCase__: List[str] = False
def _lowerCamelCase ( self: Tuple ) -> Dict:
__UpperCAmelCase : List[str] = FocalNetModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=37 , has_text_modality=__lowerCamelCase )
def _lowerCamelCase ( self: List[str] ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> Any:
return
def _lowerCamelCase ( self: Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> Any:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def _lowerCamelCase ( self: Dict ) -> Tuple:
pass
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Tuple ) -> Any:
__UpperCAmelCase : Optional[int] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Union[str, Any] = outputs.hidden_states
__UpperCAmelCase : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# FocalNet has a different seq_length
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = reshaped_hidden_states[0].shape
__UpperCAmelCase : int = (
reshaped_hidden_states[0].view(__lowerCamelCase , __lowerCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : Dict = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCAmelCase : int = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width) )
@slow
def _lowerCamelCase ( self: Optional[int] ) -> int:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = FocalNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
# TODO update organization
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: Optional[int] ) -> str:
__UpperCAmelCase : List[Any] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__UpperCAmelCase : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : Dict = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase__: Dict = FocalNetConfig
lowerCamelCase__: List[str] = False
def _lowerCamelCase ( self: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = FocalNetModelTester(self )
| 342 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_snake_case = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | import argparse
import struct
import unittest
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: bytes ) -> None:
__UpperCAmelCase : Tuple = data
# Initialize hash values
__UpperCAmelCase : Any = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
__UpperCAmelCase : Dict = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
__UpperCAmelCase : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: bytes ) -> bytes:
__UpperCAmelCase : List[str] = B"\x80" + (B"\x00" * (63 - (len(__lowerCamelCase ) + 8) % 64))
__UpperCAmelCase : int = struct.pack(">Q" , (len(__lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self: Dict ) -> None:
# Convert into blocks of 64 bytes
__UpperCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : List[str] = list(struct.unpack(">16L" , __lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : str = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
__UpperCAmelCase : Union[str, Any] = self.ror(__lowerCamelCase , 6 ) ^ self.ror(__lowerCamelCase , 11 ) ^ self.ror(__lowerCamelCase , 25 )
__UpperCAmelCase : Tuple = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
__UpperCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase : List[Any] = self.ror(__lowerCamelCase , 2 ) ^ self.ror(__lowerCamelCase , 13 ) ^ self.ror(__lowerCamelCase , 22 )
__UpperCAmelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
__UpperCAmelCase : Optional[int] = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : int = "".join([hex(__lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: int ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[Any] ) -> None:
import hashlib
__UpperCAmelCase : Dict = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__lowerCamelCase ).hash , hashlib.shaaaa(__lowerCamelCase ).hexdigest() )
def _UpperCamelCase ( ) -> None:
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", )
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file" )
__UpperCAmelCase : List[Any] = parser.parse_args()
__UpperCAmelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb" ) as f:
__UpperCAmelCase : List[str] = f.read()
else:
__UpperCAmelCase : List[Any] = bytes(snake_case__, "utf-8" )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 342 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.