code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
A_ = AltDiffusionPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_lowercase : Optional[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_lowercase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_lowercase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
_lowercase : str = CLIPTextModel(UpperCamelCase_ )
_lowercase : str = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_lowercase : Union[str, Any] = 77
_lowercase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : Union[str, Any] = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.get_dummy_components()
torch.manual_seed(0 )
_lowercase : List[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_lowercase : Optional[Any] = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
_lowercase : str = text_encoder
_lowercase : str = AltDiffusionPipeline(**UpperCamelCase_ )
_lowercase : List[Any] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Any = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : int = 'A photo of an astronaut'
_lowercase : Optional[int] = alt_pipe(**UpperCamelCase_ )
_lowercase : List[str] = output.images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : int = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
_lowercase : Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_lowercase : Optional[int] = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
_lowercase : Tuple = text_encoder
_lowercase : str = AltDiffusionPipeline(**UpperCamelCase_ )
_lowercase : List[Any] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Dict = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : List[str] = alt_pipe(**UpperCamelCase_ )
_lowercase : Any = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : str = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : int = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=UpperCamelCase_ )
_lowercase : List[Any] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Any = 'A painting of a squirrel eating a burger'
_lowercase : int = torch.manual_seed(0 )
_lowercase : Dict = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
_lowercase : List[Any] = output.images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : Optional[int] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
_lowercase : Optional[Any] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ )
_lowercase : str = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : int = 'A painting of a squirrel eating a burger'
_lowercase : Any = torch.manual_seed(0 )
_lowercase : Optional[Any] = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='numpy' )
_lowercase : List[Any] = output.images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : str = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( ) -> list[list[int]]:
return [list(range(1000 - i, -1000 - i, -1 ) ) for i in range(1000 )]
_A : List[Any] =generate_large_matrix()
_A : Optional[int] =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCamelCase ( _lowercase ) -> None:
assert all(row == sorted(_lowercase, reverse=_lowercase ) for row in grid )
assert all(list(_lowercase ) == sorted(_lowercase, reverse=_lowercase ) for col in zip(*_lowercase ) )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : List[str] = 0
_lowercase : str = len(_lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Dict = (left + right) // 2
_lowercase : Union[str, Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : str = mid + 1
else:
_lowercase : Union[str, Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowercase )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : Any = 0
_lowercase : str = len(grid[0] )
for i in range(len(_lowercase ) ):
_lowercase : int = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowercase ) * len(grid[0] )) - total
def __UpperCamelCase ( _lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowercase ):
if number < 0:
total += len(_lowercase ) - i
break
return total
def __UpperCamelCase ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
_lowercase : List[str] = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : int = timeit(f'''{func}(grid=grid)''', setup=_lowercase, number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __UpperCamelCase ( ) -> None:
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_A : Union[str, Any] =logging.getLogger()
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Dict = {}
_lowercase : Optional[int] = os.path.join(_lowercase, 'all_results.json' )
if os.path.exists(_lowercase ):
with open(_lowercase, 'r' ) as f:
_lowercase : int = json.load(_lowercase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
_A : Optional[int] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
import xla_spawn
_lowercase : List[Any] = self.get_auto_remove_tmp_dir()
_lowercase : List[str] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(UpperCamelCase_ , 'argv' , UpperCamelCase_ ):
_lowercase : Union[str, Any] = time()
xla_spawn.main()
_lowercase : Tuple = time()
_lowercase : Optional[Any] = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
import xla_spawn
_lowercase : List[str] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(UpperCamelCase_ , 'argv' , UpperCamelCase_ ):
xla_spawn.main()
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_A : str =logging.getLogger(__name__)
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
_lowercase : Any = np.argmax(_lowercase, axis=1 )
return np.sum(outputs == labels )
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
with open(_lowercase, encoding='utf_8' ) as f:
_lowercase : List[str] = csv.reader(_lowercase )
_lowercase : Tuple = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Dict:
_lowercase : Any = []
for dataset in encoded_datasets:
_lowercase : int = len(_lowercase )
_lowercase : List[str] = np.zeros((n_batch, 2, input_len), dtype=np.intaa )
_lowercase : Optional[int] = np.zeros((n_batch, 2), dtype=np.intaa )
_lowercase : str = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.intaa )
_lowercase : Any = np.zeros((n_batch,), dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
_lowercase : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowercase : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowercase : Optional[int] = with_conta
_lowercase : List[Any] = with_conta
_lowercase : Optional[int] = len(_lowercase ) - 1
_lowercase : Optional[Any] = len(_lowercase ) - 1
_lowercase : List[Any] = with_conta
_lowercase : Tuple = with_conta
_lowercase : Any = mc_label
_lowercase : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def __UpperCamelCase ( ) -> List[Any]:
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_name', type=_lowercase, default='openai-gpt', help='pretrained model name' )
parser.add_argument('--do_train', action='store_true', help='Whether to run training.' )
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir', default=_lowercase, type=_lowercase, required=_lowercase, help='The output directory where the model predictions and checkpoints will be written.', )
parser.add_argument('--train_dataset', type=_lowercase, default='' )
parser.add_argument('--eval_dataset', type=_lowercase, default='' )
parser.add_argument('--seed', type=_lowercase, default=42 )
parser.add_argument('--num_train_epochs', type=_lowercase, default=3 )
parser.add_argument('--train_batch_size', type=_lowercase, default=8 )
parser.add_argument('--eval_batch_size', type=_lowercase, default=16 )
parser.add_argument('--adam_epsilon', default=1E-8, type=_lowercase, help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm', type=_lowercase, default=1 )
parser.add_argument(
'--max_steps', default=-1, type=_lowercase, help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
), )
parser.add_argument(
'--gradient_accumulation_steps', type=_lowercase, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.', )
parser.add_argument('--learning_rate', type=_lowercase, default=6.2_5E-5 )
parser.add_argument('--warmup_steps', default=0, type=_lowercase, help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule', type=_lowercase, default='warmup_linear' )
parser.add_argument('--weight_decay', type=_lowercase, default=0.0_1 )
parser.add_argument('--lm_coef', type=_lowercase, default=0.9 )
parser.add_argument('--n_valid', type=_lowercase, default=374 )
parser.add_argument('--server_ip', type=_lowercase, default='', help='Can be used for distant debugging.' )
parser.add_argument('--server_port', type=_lowercase, default='', help='Can be used for distant debugging.' )
_lowercase : Dict = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_lowercase : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowercase : Union[str, Any] = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_lowercase, _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_lowercase : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
_lowercase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
_lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
_lowercase : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(_lowercase ):
if isinstance(_lowercase, _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase, _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info('Encoding dataset...' )
_lowercase : Any = load_rocstories_dataset(args.train_dataset )
_lowercase : List[str] = load_rocstories_dataset(args.eval_dataset )
_lowercase : Dict = (train_dataset, eval_dataset)
_lowercase : Optional[int] = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
_lowercase : Optional[Any] = model.config.n_positions // 2 - 2
_lowercase : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_lowercase : List[str] = min(_lowercase, model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_lowercase : Optional[int] = pre_process_datasets(_lowercase, _lowercase, _lowercase, *_lowercase )
_lowercase : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
_lowercase : Any = TensorDataset(*_lowercase )
_lowercase : Optional[Any] = RandomSampler(_lowercase )
_lowercase : Union[str, Any] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.train_batch_size )
_lowercase : Optional[int] = TensorDataset(*_lowercase )
_lowercase : List[Any] = SequentialSampler(_lowercase )
_lowercase : Optional[Any] = DataLoader(_lowercase, sampler=_lowercase, batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_lowercase : Tuple = args.max_steps
_lowercase : List[str] = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
_lowercase : Dict = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
_lowercase : Optional[int] = list(model.named_parameters() )
_lowercase : Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_lowercase : Tuple = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_lowercase : Tuple = AdamW(_lowercase, lr=args.learning_rate, eps=args.adam_epsilon )
_lowercase : Optional[int] = get_linear_schedule_with_warmup(
_lowercase, num_warmup_steps=args.warmup_steps, num_training_steps=_lowercase )
if args.do_train:
_lowercase : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ), desc='Epoch' ):
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = 0
_lowercase : Dict = tqdm(_lowercase, desc='Training' )
for step, batch in enumerate(_lowercase ):
_lowercase : Dict = tuple(t.to(_lowercase ) for t in batch )
_lowercase : Dict = batch
_lowercase : Optional[Any] = model(_lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase )
_lowercase : Any = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_lowercase : str = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_lowercase : Dict = 'Training loss: {:.2e} lr: {:.2e}'.format(_lowercase, scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_lowercase : List[str] = model.module if hasattr(_lowercase, 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_lowercase : Optional[int] = os.path.join(args.output_dir, _lowercase )
_lowercase : List[Any] = os.path.join(args.output_dir, _lowercase )
torch.save(model_to_save.state_dict(), _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_lowercase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_lowercase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
_lowercase : List[Any] = 0, 0
_lowercase : List[str] = 0, 0
for batch in tqdm(_lowercase, desc='Evaluating' ):
_lowercase : str = tuple(t.to(_lowercase ) for t in batch )
_lowercase : Any = batch
with torch.no_grad():
_lowercase : Tuple = model(
_lowercase, mc_token_ids=_lowercase, lm_labels=_lowercase, mc_labels=_lowercase )
_lowercase : List[str] = mc_logits.detach().cpu().numpy()
_lowercase : Any = mc_labels.to('cpu' ).numpy()
_lowercase : int = accuracy(_lowercase, _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_lowercase : Tuple = eval_loss / nb_eval_steps
_lowercase : Optional[int] = eval_accuracy / nb_eval_examples
_lowercase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
_lowercase : List[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_lowercase : Optional[Any] = os.path.join(args.output_dir, 'eval_results.txt' )
with open(_lowercase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s', _lowercase, str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = DiTPipeline
A_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
A_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ = False
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Union[str, Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCamelCase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=UpperCamelCase_ , )
_lowercase : Optional[Any] = AutoencoderKL()
_lowercase : Tuple = DDIMScheduler()
_lowercase : Tuple = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=0 ) -> List[str]:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : Any = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
_lowercase : Union[str, Any] = 'cpu'
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : List[str] = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Optional[int] = pipe(**UpperCamelCase_ ).images
_lowercase : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowercase : Tuple = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
_lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=UpperCamelCase_ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = torch.manual_seed(0 )
_lowercase : Dict = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_lowercase : List[str] = ['vase', 'umbrella', 'white shark', 'white wolf']
_lowercase : List[str] = pipe.get_label_ids(UpperCamelCase_ )
_lowercase : Optional[int] = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[str] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : List[str] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_lowercase : Any = ['vase', 'umbrella']
_lowercase : int = pipe.get_label_ids(UpperCamelCase_ )
_lowercase : Optional[int] = torch.manual_seed(0 )
_lowercase : int = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
if (
not isinstance(_lowercase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
if (
not isinstance(_lowercase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
import math
def __UpperCamelCase ( _lowercase ) -> int:
if not isinstance(_lowercase, _lowercase ):
_lowercase : Any = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
_lowercase : Optional[int] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowercase : Union[str, Any] = int(math.log(number // 3, 2 ) ) + 2
_lowercase : Dict = [3, 5]
_lowercase : List[str] = 2
_lowercase : str = 3
for block in range(1, _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
_A : int =0
try:
_A : Union[str, Any] =proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """marian"""
A_ = ["""past_key_values"""]
A_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , UpperCamelCase_ : Union[str, Any]=5_8101 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=1024 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : str=4096 , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Union[str, Any]=4096 , UpperCamelCase_ : Tuple=16 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : str=5_8100 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Union[str, Any]=5_8100 , UpperCamelCase_ : int=0 , UpperCamelCase_ : str=0 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Any , ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = vocab_size
_lowercase : Tuple = decoder_vocab_size or vocab_size
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Tuple = d_model
_lowercase : Optional[Any] = encoder_ffn_dim
_lowercase : Optional[Any] = encoder_layers
_lowercase : List[Any] = encoder_attention_heads
_lowercase : int = decoder_ffn_dim
_lowercase : List[Any] = decoder_layers
_lowercase : List[Any] = decoder_attention_heads
_lowercase : Tuple = dropout
_lowercase : int = attention_dropout
_lowercase : str = activation_dropout
_lowercase : int = activation_function
_lowercase : Any = init_std
_lowercase : Dict = encoder_layerdrop
_lowercase : Optional[Any] = decoder_layerdrop
_lowercase : List[Any] = use_cache
_lowercase : List[str] = encoder_layers
_lowercase : int = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : int = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[str] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase : Tuple = {0: 'batch'}
_lowercase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase : Tuple = {0: 'batch', 1: 'decoder_sequence'}
_lowercase : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase : List[Any] = self.num_layers
for i in range(UpperCamelCase_ ):
_lowercase : str = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[str] = super().outputs
else:
_lowercase : int = super(UpperCamelCase_ , self ).outputs
if self.use_past:
_lowercase : str = self.num_layers
for i in range(UpperCamelCase_ ):
_lowercase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
_lowercase : Any = seq_length if not self.use_past else 1
_lowercase : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Optional[int] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowercase : str = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase : Optional[Any] = common_inputs['input_ids'].shape
_lowercase : List[str] = common_inputs['decoder_input_ids'].shape[1]
_lowercase : Tuple = self.num_attention_heads
_lowercase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : List[Any] = decoder_seq_length + 3
_lowercase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
_lowercase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase : Dict = self.num_layers
_lowercase : Dict = min(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
_lowercase : Union[str, Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
_lowercase : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase : Any = seqlen + 2
_lowercase : Union[str, Any] = self.num_layers
_lowercase : List[Any] = self.num_attention_heads
_lowercase : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : Union[str, Any] = common_inputs['attention_mask'].dtype
_lowercase : Union[str, Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
_lowercase : Optional[Any] = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def __UpperCAmelCase ( self : int , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase : int = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
_lowercase : int = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
_lowercase : Optional[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase : List[str] = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
_lowercase : Dict = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ) -> Tuple:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Tuple = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
_lowercase : int = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@property
def __UpperCAmelCase ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase__ :
'''simple docstring'''
def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
return None
class lowerCamelCase__ :
'''simple docstring'''
def __UpperCAmelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return None
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
A_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase_ , 'tf' , 12 , **UpperCamelCase_ )
@require_torch
@slow
def __UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase_ , 'pt' , 12 , **UpperCamelCase_ )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
from transformers import BertModel
_lowercase : Optional[Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(UpperCamelCase_ ) )
vocab_file.flush()
_lowercase : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_lowercase : Any = BertModel(BertConfig(vocab_size=len(UpperCamelCase_ ) ) )
model.save_pretrained(UpperCamelCase_ )
self._test_export(UpperCamelCase_ , 'pt' , 12 , UpperCamelCase_ )
@require_tf
@slow
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowercase : Dict = self._test_export(UpperCamelCase_ , 'tf' , 12 , **UpperCamelCase_ )
_lowercase : str = quantize(Path(UpperCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowercase : int = self._test_export(UpperCamelCase_ , 'pt' , 12 , **UpperCamelCase_ )
_lowercase : List[Any] = quantize(UpperCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowercase : int = Path(UpperCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
return path
except Exception as e:
self.fail(UpperCamelCase_ )
@require_torch
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
from transformers import BertModel
_lowercase : List[str] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_lowercase : List[str] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
from transformers import TFBertModel
_lowercase : Any = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , 'tf' )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : str = FeatureExtractionPipeline(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_lowercase : Dict = infer_shapes(UpperCamelCase_ , UpperCamelCase_ )
# Assert all variables are present
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCamelCase_ )
self.assertSequenceEqual(variable_names[3:] , UpperCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
_lowercase : Tuple = ['input_ids', 'attention_mask', 'token_type_ids']
_lowercase : str = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_lowercase : str = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCamelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCamelCase_ ) , set(UpperCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCamelCase_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowercase : str = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCamelCase_ ) , 1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = min(_lowercase ) # min() finds the minimum value
_lowercase : List[str] = max(_lowercase ) # max() finds the maximum value
_lowercase : Dict = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowercase : Optional[int] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowercase, _lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowercase : int = 0
for count in range(_lowercase ):
while holes[count] > 0:
holes[count] -= 1
_lowercase : List[str] = count + min_val
i += 1
def __UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
_lowercase : Dict = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowercase )
print('Sorted order is:', ' '.join(_lowercase ) )
if __name__ == "__main__":
main()
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
return x + 2
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = 'x = 3'
_lowercase : str = {}
_lowercase : Union[str, Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {'x': 3} )
_lowercase : str = 'x = y'
_lowercase : int = {'y': 5}
_lowercase : Optional[int] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {'x': 5, 'y': 5} )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = 'y = add_two(x)'
_lowercase : List[Any] = {'x': 3}
_lowercase : int = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_lowercase : Dict = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def __UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
_lowercase : Dict = 'x = 3'
_lowercase : Any = {}
_lowercase : List[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {'x': 3} )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[str] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
_lowercase : Optional[int] = {'x': 3}
_lowercase : List[str] = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = 'x = 3\ny = 5'
_lowercase : Any = {}
_lowercase : int = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 5} )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowercase : List[str] = 'text = f\'This is x: {x}.\''
_lowercase : str = {'x': 3}
_lowercase : Dict = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = 'if x <= 3:\n y = 2\nelse:\n y = 5'
_lowercase : Tuple = {'x': 3}
_lowercase : Optional[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 2} )
_lowercase : Any = {'x': 8}
_lowercase : List[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {'x': 8, 'y': 5} )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = 'test_list = [x, add_two(x)]'
_lowercase : Tuple = {'x': 3}
_lowercase : Union[str, Any] = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [3, 5] )
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_list': [3, 5]} )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = 'y = x'
_lowercase : Dict = {'x': 3}
_lowercase : Optional[Any] = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'y': 3} )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = 'test_list = [x, add_two(x)]\ntest_list[1]'
_lowercase : int = {'x': 3}
_lowercase : Any = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_list': [3, 5]} )
_lowercase : int = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
_lowercase : Optional[Any] = {'x': 3}
_lowercase : Dict = evaluate(UpperCamelCase_ , {'add_two': add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
_lowercase : List[str] = 'x = 0\nfor i in range(3):\n x = i'
_lowercase : int = {}
_lowercase : Dict = evaluate(UpperCamelCase_ , {'range': range} , state=UpperCamelCase_ )
assert result == 2
self.assertDictEqual(UpperCamelCase_ , {'x': 2, 'i': 2} )
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A : List[str] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""pixel_values"""]
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : float = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : str , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Union[str, Any] = size if size is not None else {'shortest_edge': 384}
_lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase : Any = do_resize
_lowercase : List[str] = size
# Default value set here for backwards compatibility where the value in config is None
_lowercase : Dict = crop_pct if crop_pct is not None else 224 / 256
_lowercase : Tuple = resample
_lowercase : str = do_rescale
_lowercase : str = rescale_factor
_lowercase : List[str] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : float , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ) -> np.ndarray:
'''simple docstring'''
_lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_lowercase : str = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowercase : List[Any] = int(shortest_edge / crop_pct )
_lowercase : Tuple = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase : Union[str, Any] = resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=UpperCamelCase_ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
UpperCamelCase_ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ) -> Dict:
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : float = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : int , ) -> PIL.Image.Image:
'''simple docstring'''
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : int = crop_pct if crop_pct is not None else self.crop_pct
_lowercase : Dict = resample if resample is not None else self.resample
_lowercase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : Union[str, Any] = image_std if image_std is not None else self.image_std
_lowercase : str = size if size is not None else self.size
_lowercase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase : Any = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
_lowercase : int = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , crop_pct=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
_lowercase : List[str] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
_lowercase : Any = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
_lowercase : Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A : List[str] ={
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def __UpperCamelCase ( _lowercase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(_lowercase ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ) -> Iterator[int]:
_lowercase : List[Any] = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def __UpperCamelCase ( _lowercase = 200_0000 ) -> int:
return sum(takewhile(lambda _lowercase : x < n, prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A : Optional[int] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_A : Optional[Any] =''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
_lowercase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
_lowercase : List[str] = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase_ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=None ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase : Dict = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowercase : Any = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
_lowercase : List[Any] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(UpperCamelCase_ , 'w' , newline='\n' ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , 'r' ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : str = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , UpperCamelCase_ ) , )
# Copy consistency with a really long name
_lowercase : Optional[Any] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , UpperCamelCase_ , overwrite_result=re.sub('Bert' , 'TestModel' , UpperCamelCase_ ) , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowercase : List[Any] = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme['format_model_list'] )
self.assertFalse(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : str = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase_ )
_lowercase : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowercase : int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : Tuple = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_A : int =pytest.mark.integration
@pytest.mark.parametrize('path', ['paws', 'csv'] )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
inspect_dataset(_lowercase, _lowercase )
_lowercase : Any = path + '.py'
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path', ['accuracy'] )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple:
inspect_metric(_lowercase, _lowercase )
_lowercase : List[Any] = path + '.py'
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.parametrize(
'path, config_name, expected_splits', [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
_lowercase : Any = get_dataset_config_info(_lowercase, config_name=_lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception', [
('paws', None, ValueError),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[str]:
with pytest.raises(_lowercase ):
get_dataset_config_info(_lowercase, config_name=_lowercase )
@pytest.mark.parametrize(
'path, expected', [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Tuple = get_dataset_config_names(_lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config', [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]:
_lowercase : List[str] = get_dataset_infos(_lowercase )
assert list(infos.keys() ) == expected_configs
_lowercase : Tuple = expected_configs[0]
assert expected_config in infos
_lowercase : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits', [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Any:
_lowercase : Any = get_dataset_infos(_lowercase )
assert expected_config in infos
_lowercase : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception', [
('paws', None, ValueError),
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]:
with pytest.raises(_lowercase ):
get_dataset_split_names(_lowercase, config_name=_lowercase )
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : int = k_size // 2
_lowercase : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowercase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(_lowercase ) + square(_lowercase )) / (2 * square(_lowercase )) )
return g
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]:
_lowercase : Any = image.shape[0], image.shape[1]
# dst image height and width
_lowercase : List[str] = height - k_size + 1
_lowercase : List[str] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowercase : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
_lowercase : List[str] = 0
for i, j in product(range(_lowercase ), range(_lowercase ) ):
_lowercase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
_lowercase : Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowercase : Optional[Any] = gen_gaussian_kernel(_lowercase, _lowercase )
_lowercase : str = ravel(_lowercase )
# reshape and get the dst image
_lowercase : List[str] = dot(_lowercase, _lowercase ).reshape(_lowercase, _lowercase ).astype(_lowercase )
return dst
if __name__ == "__main__":
# read original image
_A : Optional[int] =imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
_A : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_A : Dict =gaussian_filter(gray, 3, sigma=1)
_A : Optional[int] =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
import operator as op
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Optional[Any] = []
_lowercase : Any = lambda _lowercase, _lowercase : int(x / y ) # noqa: E731 integer division operation
_lowercase : str = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ), 'Action'.center(12 ), 'Stack', sep=' | ' )
print('-' * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('push(' + x + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
else:
_lowercase : Optional[int] = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + b + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
_lowercase : Tuple = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ), ('pop(' + a + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ' )
stack.append(
str(opr[x](int(_lowercase ), int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('push(' + a + x + b + ')').ljust(12 ), ','.join(_lowercase ), sep=' | ', )
return int(stack[0] )
if __name__ == "__main__":
_A : List[str] =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
_lowercase : int = 384
_lowercase : Union[str, Any] = 7
if "tiny" in model_name:
_lowercase : Optional[Any] = 96
_lowercase : Dict = (2, 2, 6, 2)
_lowercase : Dict = (3, 6, 12, 24)
elif "small" in model_name:
_lowercase : Union[str, Any] = 96
_lowercase : Dict = (2, 2, 18, 2)
_lowercase : Dict = (3, 6, 12, 24)
elif "base" in model_name:
_lowercase : Any = 128
_lowercase : Optional[Any] = (2, 2, 18, 2)
_lowercase : str = (4, 8, 16, 32)
_lowercase : List[Any] = 12
_lowercase : Any = 512
elif "large" in model_name:
_lowercase : List[str] = 192
_lowercase : List[Any] = (2, 2, 18, 2)
_lowercase : Union[str, Any] = (6, 12, 24, 48)
_lowercase : int = 12
_lowercase : int = 768
# set label information
_lowercase : List[Any] = 150
_lowercase : List[str] = 'huggingface/label-files'
_lowercase : str = 'ade20k-id2label.json'
_lowercase : List[Any] = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type='dataset' ), 'r' ) )
_lowercase : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
_lowercase : str = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = SwinConfig(
embed_dim=_lowercase, depths=_lowercase, num_heads=_lowercase, window_size=_lowercase, out_features=['stage1', 'stage2', 'stage3', 'stage4'], )
_lowercase : Tuple = UperNetConfig(
backbone_config=_lowercase, auxiliary_in_channels=_lowercase, num_labels=_lowercase, idalabel=_lowercase, labelaid=_lowercase, )
return config
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : Any = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : Optional[int] = dct.pop(_lowercase )
_lowercase : List[str] = val
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowercase : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowercase : Optional[int] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
_lowercase : str = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Dict = in_proj_weight[:dim, :]
_lowercase : List[str] = in_proj_bias[: dim]
_lowercase : Dict = in_proj_weight[
dim : dim * 2, :
]
_lowercase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowercase : Optional[int] = in_proj_weight[
-dim :, :
]
_lowercase : int = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _lowercase ) -> List[str]:
_lowercase : Dict = x.shape
_lowercase : Dict = x.reshape(_lowercase, 4, in_channel // 4 )
_lowercase : Optional[Any] = x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(_lowercase, _lowercase )
return x
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Optional[int] = x.shape
_lowercase : Optional[Any] = x.reshape(_lowercase, in_channel // 4, 4 )
_lowercase : str = x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(_lowercase, _lowercase )
return x
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : str = x.shape[0]
_lowercase : List[Any] = x.reshape(4, in_channel // 4 )
_lowercase : Union[str, Any] = x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(_lowercase )
return x
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Dict = x.shape[0]
_lowercase : Any = x.reshape(in_channel // 4, 4 )
_lowercase : List[str] = x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(_lowercase )
return x
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_lowercase : Dict = model_name_to_url[model_name]
_lowercase : Any = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu', file_name=_lowercase )[
'state_dict'
]
for name, param in state_dict.items():
print(_lowercase, param.shape )
_lowercase : str = get_upernet_config(_lowercase )
_lowercase : List[Any] = UperNetForSemanticSegmentation(_lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase : Tuple = state_dict.pop(_lowercase )
if "bn" in key:
_lowercase : str = key.replace('bn', 'batch_norm' )
_lowercase : Union[str, Any] = val
# rename keys
_lowercase : str = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase, _lowercase, _lowercase )
read_in_q_k_v(_lowercase, config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowercase : Any = reverse_correct_unfold_reduction_order(_lowercase )
if "norm" in key:
_lowercase : Tuple = reverse_correct_unfold_norm_order(_lowercase )
model.load_state_dict(_lowercase )
# verify on image
_lowercase : List[str] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase : List[Any] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : str = SegformerImageProcessor()
_lowercase : Optional[int] = processor(_lowercase, return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase : str = model(_lowercase )
_lowercase : Tuple = outputs.logits
print(logits.shape )
print('First values of logits:', logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowercase : Dict = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
_lowercase : Union[str, Any] = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
_lowercase : int = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
_lowercase : Any = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('Logits:', outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], _lowercase, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A : Union[str, Any] ={
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
_lowercase : Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
_lowercase : Any = tokenizer('Hello there' , return_tensors='np' ).input_ids
_lowercase : Any = tokenizer('Hi I am' , return_tensors='np' ).input_ids
_lowercase : Union[str, Any] = shift_tokens_right(UpperCamelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowercase : List[Any] = model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ).logits
_lowercase : Dict = optax.softmax_cross_entropy(UpperCamelCase_ , onehot(UpperCamelCase_ , logits.shape[-1] ) ).mean()
_lowercase : Tuple = -(labels.shape[-1] * loss.item())
_lowercase : Optional[int] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert isinstance(_lowercase, _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Any = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : List[Any] = TextDatasetReader(_lowercase, cache_dir=_lowercase, keep_in_memory=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : List[str] = tmp_path / 'cache'
_lowercase : Optional[int] = {'text': 'string'}
_lowercase : int = features.copy() if features else default_expected_features
_lowercase : List[str] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Tuple = TextDatasetReader(_lowercase, features=_lowercase, cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Any = {'text': 'string'}
_lowercase : Tuple = TextDatasetReader(_lowercase, cache_dir=_lowercase, split=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> int:
if issubclass(_lowercase, _lowercase ):
_lowercase : Any = text_path
elif issubclass(_lowercase, _lowercase ):
_lowercase : Any = [text_path]
_lowercase : List[Any] = tmp_path / 'cache'
_lowercase : Any = {'text': 'string'}
_lowercase : List[str] = TextDatasetReader(_lowercase, cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=("train",) ) -> Optional[Any]:
assert isinstance(_lowercase, _lowercase )
for split in splits:
_lowercase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Any = tmp_path / 'cache'
_lowercase : Union[str, Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : List[Any] = TextDatasetReader({'train': text_path}, cache_dir=_lowercase, keep_in_memory=_lowercase ).read()
_check_text_datasetdict(_lowercase, _lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[Any]:
_lowercase : str = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowercase : Optional[int] = {'text': 'string'}
_lowercase : Union[str, Any] = features.copy() if features else default_expected_features
_lowercase : List[str] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Any = TextDatasetReader({'train': text_path}, features=_lowercase, cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase, _lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> int:
if split:
_lowercase : Dict = {split: text_path}
else:
_lowercase : Union[str, Any] = 'train'
_lowercase : Union[str, Any] = {'train': text_path, 'test': text_path}
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Union[str, Any] = {'text': 'string'}
_lowercase : Optional[int] = TextDatasetReader(_lowercase, cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase, _lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase = 1000 ) -> int:
_lowercase : Dict = -1
_lowercase : Optional[int] = 0
for a in range(1, n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_lowercase : List[str] = (n * n - 2 * a * n) // (2 * n - 2 * a)
_lowercase : Tuple = n - a - b
if c * c == (a * a + b * b):
_lowercase : int = a * b * c
if candidate >= product:
_lowercase : Optional[int] = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_A : int =(
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
_A : Dict =(
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
_A : int =(
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
_A : Optional[int] =(
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def __UpperCamelCase ( ) -> Dict:
_lowercase : Optional[Any] = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
_lowercase : List[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowercase : str = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __UpperCamelCase ( _lowercase = 100 ) -> Any:
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : List[Any] = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize('hand, other, expected', generate_random_hands() )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def __UpperCamelCase ( ) -> Dict:
_lowercase : Optional[int] = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
_lowercase : Tuple = poker_hands.copy()
shuffle(_lowercase )
_lowercase : Optional[int] = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def __UpperCamelCase ( ) -> Any:
# Test that five high straights are compared correctly.
_lowercase : List[Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __UpperCamelCase ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_lowercase : Tuple = PokerHand('2C 4S AS 3D 5C' )
_lowercase : Any = True
_lowercase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __UpperCamelCase ( ) -> Any:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_lowercase : Optional[int] = 0
_lowercase : int = os.path.abspath(os.path.dirname(_lowercase ) )
_lowercase : Optional[int] = os.path.join(_lowercase, 'poker_hands.txt' )
with open(_lowercase ) as file_hand:
for line in file_hand:
_lowercase : Optional[int] = line[:14].strip()
_lowercase : str = line[15:].strip()
_lowercase : str = PokerHand(_lowercase ), PokerHand(_lowercase )
_lowercase : Optional[int] = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 376
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_A : Optional[Any] =TypeVar('''T''')
def __UpperCamelCase ( _lowercase ) -> int:
return (position - 1) // 2
def __UpperCamelCase ( _lowercase ) -> int:
return (2 * position) + 1
def __UpperCamelCase ( _lowercase ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any ) -> None:
'''simple docstring'''
_lowercase : list[tuple[T, int]] = []
_lowercase : dict[T, int] = {}
_lowercase : int = 0
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return str(self.heap )
def __UpperCAmelCase ( self : List[str] ) -> bool:
'''simple docstring'''
return self.elements == 0
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : T , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
_lowercase : Tuple = self.elements
self.elements += 1
self._bubble_up(UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_lowercase : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowercase : List[str] = self.heap[0]
self._bubble_down(UpperCamelCase_ )
return elem
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : T , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : List[Any] = self.position_map[elem]
_lowercase : Optional[Any] = (elem, weight)
if position > 0:
_lowercase : Optional[Any] = get_parent_position(UpperCamelCase_ )
_lowercase : str = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : T ) -> None:
'''simple docstring'''
_lowercase : Optional[int] = self.position_map[elem]
if curr_pos == 0:
return None
_lowercase : Optional[Any] = get_parent_position(UpperCamelCase_ )
_lowercase : List[str] = self.heap[curr_pos]
_lowercase : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_up(UpperCamelCase_ )
return None
def __UpperCAmelCase ( self : str , UpperCamelCase_ : T ) -> None:
'''simple docstring'''
_lowercase : Optional[Any] = self.position_map[elem]
_lowercase : Optional[Any] = self.heap[curr_pos]
_lowercase : Dict = get_child_left_position(UpperCamelCase_ )
_lowercase : Union[str, Any] = get_child_right_position(UpperCamelCase_ )
if child_left_position < self.elements and child_right_position < self.elements:
_lowercase : List[Any] = self.heap[child_left_position]
_lowercase : int = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
if child_left_position < self.elements:
_lowercase : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
else:
return None
if child_right_position < self.elements:
_lowercase : Optional[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
return None
def __UpperCAmelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : Dict = self.heap[nodea_pos][0]
_lowercase : List[Any] = self.heap[nodea_pos][0]
_lowercase : Any = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowercase : str = nodea_pos
_lowercase : Optional[int] = nodea_pos
class lowerCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any ) -> None:
'''simple docstring'''
_lowercase : dict[T, dict[T, int]] = {}
_lowercase : int = 0
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self : int ) -> int:
'''simple docstring'''
return self.nodes
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : T ) -> None:
'''simple docstring'''
if node not in self.connections:
_lowercase : Union[str, Any] = {}
self.nodes += 1
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : T , UpperCamelCase_ : T , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
self.add_node(UpperCamelCase_ )
self.add_node(UpperCamelCase_ )
_lowercase : int = weight
_lowercase : Tuple = weight
def __UpperCamelCase ( _lowercase, ) -> tuple[dict[T, int], dict[T, T | None]]:
_lowercase : dict[T, int] = {node: maxsize for node in graph.connections}
_lowercase : dict[T, T | None] = {node: None for node in graph.connections}
_lowercase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_lowercase, _lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowercase : Dict = priority_queue.extract_min()
_lowercase : Any = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowercase, dist[neighbour] )
_lowercase : str = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowercase : Dict = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowercase, dist[neighbour] )
_lowercase : Dict = node
return dist, parent
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import numpy as np
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase = 1E-1_2, _lowercase = 100, ) -> tuple[float, np.ndarray]:
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowercase ) == np.iscomplexobj(_lowercase )
_lowercase : Optional[int] = np.iscomplexobj(_lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowercase, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase : Optional[Any] = False
_lowercase : Any = 0
_lowercase : List[Any] = 0
_lowercase : List[Any] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
_lowercase : Optional[Any] = np.dot(_lowercase, _lowercase )
# Normalize the resulting output vector.
_lowercase : List[Any] = w / np.linalg.norm(_lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase : Optional[int] = vector.conj().T if is_complex else vector.T
_lowercase : Optional[Any] = np.dot(_lowercase, np.dot(_lowercase, _lowercase ) )
# Check convergence.
_lowercase : Tuple = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase : Optional[int] = True
_lowercase : str = lambda_
if is_complex:
_lowercase : Any = np.real(lambda_ )
return lambda_, vector
def __UpperCamelCase ( ) -> None:
_lowercase : Optional[int] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase : Optional[int] = np.array([41, 4, 20] )
_lowercase : Any = real_input_matrix.astype(np.complexaaa )
_lowercase : str = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase : List[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase : Any = real_input_matrix
_lowercase : List[Any] = real_vector
elif problem_type == "complex":
_lowercase : Union[str, Any] = complex_input_matrix
_lowercase : Optional[Any] = complex_vector
# Our implementation.
_lowercase : Union[str, Any] = power_iteration(_lowercase, _lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase : List[Any] = np.linalg.eigh(_lowercase )
# Last eigenvalue is the maximum one.
_lowercase : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase : str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowercase ) - np.abs(_lowercase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A_ = Features({"""text""": Value("""string""" )} )
A_ = Features({"""summary""": Value("""string""" )} )
A_ = """text"""
A_ = """summary"""
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Any ={
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A : List[str] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""pixel_values"""]
def __init__( self : List[str] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 255 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Any = size if size is not None else {'shortest_edge': 224}
_lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
_lowercase : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='crop_size' )
_lowercase : Optional[Any] = do_resize
_lowercase : Dict = size
_lowercase : Any = resample
_lowercase : Tuple = do_center_crop
_lowercase : Dict = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Tuple = rescale_factor
_lowercase : Tuple = do_normalize
_lowercase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase : Union[str, Any] = do_convert_rgb
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ) -> np.ndarray:
'''simple docstring'''
_lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : Any = get_resize_output_image_size(UpperCamelCase_ , size=size['shortest_edge'] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ) -> np.ndarray:
'''simple docstring'''
_lowercase : Any = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[str] , ) -> int:
'''simple docstring'''
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : int = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_lowercase : Dict = do_resize if do_resize is not None else self.do_resize
_lowercase : Tuple = size if size is not None else self.size
_lowercase : List[str] = get_size_dict(UpperCamelCase_ , param_name='size' , default_to_square=UpperCamelCase_ )
_lowercase : Any = resample if resample is not None else self.resample
_lowercase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Tuple = crop_size if crop_size is not None else self.crop_size
_lowercase : Any = get_size_dict(UpperCamelCase_ , param_name='crop_size' , default_to_square=UpperCamelCase_ )
_lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : List[str] = image_mean if image_mean is not None else self.image_mean
_lowercase : Tuple = image_std if image_std is not None else self.image_std
_lowercase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : int = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : int = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
_lowercase : Any = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
_lowercase : List[Any] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
_lowercase : Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
_lowercase : List[str] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
_lowercase : int = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> List[str]:
_lowercase : str = [False] * len(_lowercase )
_lowercase : Dict = [-1] * len(_lowercase )
def dfs(_lowercase, _lowercase ):
_lowercase : Dict = True
_lowercase : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_lowercase, 1 - c )
for i in range(len(_lowercase ) ):
if not visited[i]:
dfs(_lowercase, 0 )
for i in range(len(_lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_A : Union[str, Any] ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Optional[Any]=36 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=4 , UpperCamelCase_ : int=37 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Any=6 , UpperCamelCase_ : str=6 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Dict=1000 , ) -> List[Any]:
'''simple docstring'''
_lowercase : int = parent
_lowercase : Dict = batch_size
_lowercase : str = num_channels
_lowercase : Optional[Any] = image_size
_lowercase : Tuple = patch_size
_lowercase : Optional[int] = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Optional[int] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : int = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : str = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : str = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : Optional[int] = coordinate_size
_lowercase : List[str] = shape_size
_lowercase : Tuple = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase : List[str] = text_seq_length
_lowercase : str = (image_size // patch_size) ** 2 + 1
_lowercase : Optional[Any] = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowercase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_lowercase : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : Dict = bbox[i, j, 3]
_lowercase : Any = bbox[i, j, 1]
_lowercase : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Union[str, Any] = bbox[i, j, 0]
_lowercase : str = tmp_coordinate
_lowercase : str = tf.constant(UpperCamelCase_ )
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Dict = None
if self.use_input_mask:
_lowercase : str = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowercase : Optional[Any] = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowercase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = TFLayoutLMvaModel(config=UpperCamelCase_ )
# text + image
_lowercase : Optional[Any] = model(UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
_lowercase : str = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , training=UpperCamelCase_ , )
_lowercase : List[str] = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase : Any = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase : int = model({'pixel_values': pixel_values} , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : List[str] = self.num_labels
_lowercase : List[Any] = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase_ )
_lowercase : Any = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.num_labels
_lowercase : List[str] = TFLayoutLMvaForTokenClassification(config=UpperCamelCase_ )
_lowercase : List[Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
_lowercase : int = 2
_lowercase : Tuple = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase_ )
_lowercase : Union[str, Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
(_lowercase) : Tuple = config_and_inputs
_lowercase : int = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
return True
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any]=False ) -> dict:
'''simple docstring'''
_lowercase : Dict = copy.deepcopy(UpperCamelCase_ )
if model_class in get_values(UpperCamelCase_ ):
_lowercase : Optional[int] = {
k: tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
_lowercase : Optional[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
_lowercase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_lowercase : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
_lowercase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
_lowercase : List[str] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = TFLayoutLMvaModelTester(self )
_lowercase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(UpperCamelCase_ )
if getattr(UpperCamelCase_ , 'hf_compute_loss' , UpperCamelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowercase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : Optional[int] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase_ )[0]
]
_lowercase : Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowercase : List[str] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : Optional[int] = prepared_for_class.pop('input_ids' )
_lowercase : int = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowercase : int = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_lowercase : Dict = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowercase : int = -100
_lowercase : List[str] = tf.convert_to_tensor(UpperCamelCase_ )
_lowercase : List[str] = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowercase : Dict = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
_lowercase : List[str] = model(UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowercase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
# Get keys that were added with the _prepare_for_class function
_lowercase : str = prepared_for_class.keys() - inputs_dict.keys()
_lowercase : List[str] = inspect.signature(model.call ).parameters
_lowercase : Optional[int] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowercase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
_lowercase : Union[str, Any] = signature_names.index(UpperCamelCase_ )
_lowercase : List[str] = label_key
_lowercase : Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowercase : Optional[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowercase : int = prepared_for_class[value]
_lowercase : Any = tuple(UpperCamelCase_ )
# Send to model
_lowercase : List[str] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
(
_lowercase
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
(
_lowercase
) : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Dict = type
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
(
_lowercase
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
(
_lowercase
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
(
_lowercase
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_lowercase : Dict = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : List[str] = image_processor(images=UpperCamelCase_ , return_tensors='tf' ).pixel_values
_lowercase : Dict = tf.constant([[1, 2]] )
_lowercase : Union[str, Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_lowercase : Optional[int] = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
# verify the logits
_lowercase : Any = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase_ )
_lowercase : Union[str, Any] = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : int ={
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """luke"""
def __init__( self : List[Any] , UpperCamelCase_ : int=5_0267 , UpperCamelCase_ : List[Any]=50_0000 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Any=256 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Any=3072 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Tuple=1E-12 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Union[str, Any]=0 , UpperCamelCase_ : Optional[Any]=2 , **UpperCamelCase_ : Tuple , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = entity_vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = entity_emb_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[str] = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : Dict = initializer_range
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Optional[int] = use_entity_aware_attention
_lowercase : Optional[int] = classifier_dropout
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : int ={
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_A : Any ={
'''distilbert-base-uncased''': 5_1_2,
'''distilbert-base-uncased-distilled-squad''': 5_1_2,
'''distilbert-base-cased''': 5_1_2,
'''distilbert-base-cased-distilled-squad''': 5_1_2,
'''distilbert-base-german-cased''': 5_1_2,
'''distilbert-base-multilingual-cased''': 5_1_2,
}
_A : List[str] ={
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = ["""input_ids""", """attention_mask"""]
A_ = DistilBertTokenizer
def __init__( self : int , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=True , UpperCamelCase_ : Any="[UNK]" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Optional[Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
_lowercase : int = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
_lowercase : str = do_lower_case
_lowercase : Dict = strip_accents
_lowercase : Optional[Any] = tokenize_chinese_chars
_lowercase : Any = normalizer_class(**UpperCamelCase_ )
_lowercase : Optional[Any] = do_lower_case
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str]=None ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int = None , UpperCamelCase_ : int = None ) -> Any:
'''simple docstring'''
super().__init__()
_lowercase : Tuple = pad_token_id
_lowercase : int = max_length
_lowercase : List[Any] = vocab
_lowercase : Tuple = merges
_lowercase : int = BytePairTokenizer(UpperCamelCase_ , UpperCamelCase_ , sequence_length=UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : Any , UpperCamelCase_ : GPTaTokenizer , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = [' '.join(UpperCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
_lowercase : int = tokenizer.get_vocab()
return cls(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : int , UpperCamelCase_ : Union[str, os.PathLike] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
_lowercase : int = GPTaTokenizer.from_pretrained(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
return cls.from_tokenizer(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Any ) -> List[str]:
'''simple docstring'''
return cls(**UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int = None ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = self.tf_tokenizer(UpperCamelCase_ )
_lowercase : Optional[Any] = tf.ones_like(UpperCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowercase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowercase : List[Any] = pad_model_inputs(
UpperCamelCase_ , max_seq_length=UpperCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_A : Tuple =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_A : Union[str, Any] =os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_A : Dict =os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = get_test_to_tester_mapping(UpperCamelCase_ )
_lowercase : Optional[int] = get_test_to_tester_mapping(UpperCamelCase_ )
_lowercase : str = {'BertModelTest': 'BertModelTester'}
_lowercase : List[Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = get_model_to_test_mapping(UpperCamelCase_ )
_lowercase : int = get_model_to_test_mapping(UpperCamelCase_ )
_lowercase : Union[str, Any] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
_lowercase : Optional[int] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
_lowercase : Dict = get_model_to_tester_mapping(UpperCamelCase_ )
_lowercase : Union[str, Any] = get_model_to_tester_mapping(UpperCamelCase_ )
_lowercase : int = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
_lowercase : Union[str, Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(get_test_info.to_json(UpperCamelCase_ ) , UpperCamelCase_ )
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : int =logging.get_logger(__name__)
_A : Optional[Any] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A : Any ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_A : Union[str, Any] ={'''facebook/blenderbot-3B''': 1_2_8}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = BlenderbotTokenizer
def __init__( self : str , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any="replace" , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : str="<mask>" , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : str=True , **UpperCamelCase_ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
_lowercase : List[str] = add_prefix_space
_lowercase : Any = pre_tok_class(**UpperCamelCase_ )
_lowercase : Tuple = add_prefix_space
_lowercase : Optional[Any] = 'post_processor'
_lowercase : Tuple = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
_lowercase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase : Any = tuple(state['sep'] )
if "cls" in state:
_lowercase : int = tuple(state['cls'] )
_lowercase : Optional[int] = False
if state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = add_prefix_space
_lowercase : str = True
if state.get('trim_offsets' , UpperCamelCase_ ) != trim_offsets:
_lowercase : Optional[int] = trim_offsets
_lowercase : Optional[int] = True
if changes_to_apply:
_lowercase : str = getattr(UpperCamelCase_ , state.pop('type' ) )
_lowercase : List[str] = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
_lowercase : List[Any] = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ) -> BatchEncoding:
'''simple docstring'''
_lowercase : Union[str, Any] = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ) -> BatchEncoding:
'''simple docstring'''
_lowercase : str = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : "Conversation" ) -> List[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase_ )
_lowercase : Union[str, Any] = ' '.join(UpperCamelCase_ )
_lowercase : Dict = self.encode(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.model_max_length:
_lowercase : int = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_A : List[str] ='''\
Text data.
Second line of data.'''
_A : Optional[int] ='''file'''
@pytest.fixture(scope='session' )
def __UpperCamelCase ( _lowercase ) -> Dict:
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
_lowercase : Union[str, Any] = bytes(_lowercase, 'utf-8' )
with zstd.open(_lowercase, 'wb' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __UpperCamelCase ( _lowercase ) -> Any:
with open(os.path.join(tmpfs.local_root_dir, _lowercase ), 'w' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('compression_format', ['gzip', 'xz', 'zstd'] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Any:
_lowercase : Any = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
_lowercase : List[Any] = input_paths[compression_format]
_lowercase : Union[str, Any] = tmp_path / 'cache'
_lowercase : Union[str, Any] = DownloadConfig(cache_dir=_lowercase, extract_compressed_file=_lowercase )
_lowercase : Optional[int] = cached_path(_lowercase, download_config=_lowercase )
with open(_lowercase ) as f:
_lowercase : List[str] = f.read()
with open(_lowercase ) as f:
_lowercase : Optional[int] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted', [True, False] )
@pytest.mark.parametrize('default_cache_dir', [True, False] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : Optional[Any] = 'custom_cache'
_lowercase : Tuple = 'custom_extracted_dir'
_lowercase : List[str] = tmp_path / 'custom_extracted_path'
if default_extracted:
_lowercase : Union[str, Any] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR', _lowercase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH', str(_lowercase ) )
_lowercase : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowercase : List[Any] = xz_file
_lowercase : Any = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=_lowercase )
)
_lowercase : Tuple = cached_path(_lowercase, download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
# absolute path
_lowercase : Tuple = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
_lowercase : Dict = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
# absolute path
_lowercase : List[Any] = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
_lowercase : Tuple = './__missing_file__.txt'
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : List[Any] = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(_lowercase ) as f:
_lowercase : Optional[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase )
def __UpperCamelCase ( ) -> Dict:
with pytest.raises(_lowercase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase )
def __UpperCamelCase ( _lowercase ) -> List[str]:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_lowercase ):
http_get('https://huggingface.co', temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase )
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_lowercase ):
ftp_get('ftp://huggingface.co', temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', _lowercase )
def __UpperCamelCase ( _lowercase ) -> List[str]:
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_lowercase ):
fsspec_get('s3://huggingface.co', temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('s3://huggingface.co' )
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple:
# Load checkpoint
_lowercase : str = torch.load(_lowercase, map_location='cpu' )
_lowercase : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
_lowercase : Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_lowercase : int = v
else:
_lowercase : Tuple = v
_lowercase : Tuple = chkpt['params']
_lowercase : Any = {n: v for n, v in config.items() if not isinstance(_lowercase, (torch.FloatTensor, numpy.ndarray) )}
_lowercase : int = chkpt['dico_word2id']
_lowercase : Any = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@', '' ): i for s, i in vocab.items()}
# Save pytorch-model
_lowercase : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowercase : Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
_lowercase : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_lowercase, _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, indent=2 ) + '\n' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, indent=2 ) + '\n' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : str =parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = DanceDiffusionPipeline
A_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A_ = False
A_ = False
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCamelCase_ , use_timestep_embedding=UpperCamelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
_lowercase : List[str] = IPNDMScheduler()
_lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any=0 ) -> List[Any]:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : str = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : Optional[int] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : int = self.get_dummy_components()
_lowercase : Optional[int] = DanceDiffusionPipeline(**UpperCamelCase_ )
_lowercase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : str = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Optional[Any] = pipe(**UpperCamelCase_ )
_lowercase : Any = output.audios
_lowercase : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowercase : Union[str, Any] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
_lowercase : int = torch_device
_lowercase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowercase : Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : int = torch.manual_seed(0 )
_lowercase : Dict = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_96 )
_lowercase : Union[str, Any] = output.audios
_lowercase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase : List[str] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = torch_device
_lowercase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
_lowercase : Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Union[str, Any] = torch.manual_seed(0 )
_lowercase : List[str] = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_96 )
_lowercase : Any = output.audios
_lowercase : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase : Dict = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 255.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_A : Optional[Any] =None
_A : List[str] =logging.get_logger(__name__)
_A : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_A : int ={
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
_A : Optional[int] ={
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
_A : int ='''▁'''
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : List[str]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Union[str, Any]="<mask>" , **UpperCamelCase_ : int , ) -> str:
'''simple docstring'''
_lowercase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Optional[Any] = vocab_file
_lowercase : Optional[Any] = False if not self.vocab_file else True
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_A : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
_A : int ='''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=8 ) -> List[Any]:
_lowercase : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowercase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : DDPMScheduler , UpperCamelCase_ : VQModel , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
_lowercase : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
if latents is None:
_lowercase : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_lowercase : Dict = latents.to(UpperCamelCase_ )
_lowercase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowercase : List[str] = torch.device(F'''cuda:{gpu_id}''' )
_lowercase : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[str]=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_lowercase : int = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowercase : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowercase : Optional[int] = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
_lowercase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self : Dict , UpperCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 100 , UpperCamelCase_ : float = 4.0 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = self._execution_device
_lowercase : List[str] = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Optional[int] = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Optional[Any] = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = torch.cat(UpperCamelCase_ , dim=0 )
_lowercase : Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowercase : Optional[int] = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
_lowercase : List[str] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
_lowercase : int = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
_lowercase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
_lowercase : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
_lowercase : Optional[Any] = self.scheduler.timesteps
_lowercase : List[Any] = self.movq.config.latent_channels
_lowercase : List[str] = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
_lowercase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowercase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowercase : Dict = {'image_embeds': image_embeds, 'hint': hint}
_lowercase : str = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
_lowercase : str = noise_pred.split(latents.shape[1] , dim=1 )
_lowercase : Optional[int] = noise_pred.chunk(2 )
_lowercase : Dict = variance_pred.chunk(2 )
_lowercase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowercase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowercase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : int = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
_lowercase : Tuple = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_lowercase : Any = image * 0.5 + 0.5
_lowercase : Optional[int] = image.clamp(0 , 1 )
_lowercase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowercase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_A : Tuple =logging.get_logger(__name__)
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : Optional[Any] = 'huggingface/label-files'
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : int = json.load(open(hf_hub_download(_lowercase, _lowercase, repo_type='dataset' ), 'r' ) )
_lowercase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
_lowercase : int = {v: k for k, v in idalabel.items()}
_lowercase : str = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowercase : Dict = BitConfig(
conv_layer=_lowercase, num_labels=1000, idalabel=_lowercase, labelaid=_lowercase, )
return config
def __UpperCamelCase ( _lowercase ) -> Tuple:
if "stem.conv" in name:
_lowercase : Optional[Any] = name.replace('stem.conv', 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase : int = name.replace('blocks', 'layers' )
if "head.fc" in name:
_lowercase : Dict = name.replace('head.fc', 'classifier.1' )
if name.startswith('norm' ):
_lowercase : Union[str, Any] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
_lowercase : int = 'bit.encoder.' + name
return name
def __UpperCamelCase ( ) -> List[str]:
_lowercase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Optional[int] = Image.open(requests.get(_lowercase, stream=_lowercase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=False ) -> List[Any]:
_lowercase : str = get_config(_lowercase )
# load original model from timm
_lowercase : List[str] = create_model(_lowercase, pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
_lowercase : Union[str, Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowercase : Union[str, Any] = state_dict.pop(_lowercase )
_lowercase : Union[str, Any] = val.squeeze() if 'head' in key else val
# load HuggingFace model
_lowercase : Any = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
_lowercase : int = create_transform(**resolve_data_config({}, model=_lowercase ) )
_lowercase : Optional[Any] = transform.transforms
_lowercase : Optional[int] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_lowercase : Union[str, Any] = BitImageProcessor(
do_resize=_lowercase, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=_lowercase, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=_lowercase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
_lowercase : Any = prepare_img()
_lowercase : str = transform(_lowercase ).unsqueeze(0 )
_lowercase : Union[str, Any] = processor(_lowercase, return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase, _lowercase )
# verify logits
with torch.no_grad():
_lowercase : List[Any] = model(_lowercase )
_lowercase : Union[str, Any] = outputs.logits
print('Logits:', logits[0, :3] )
print('Predicted class:', model.config.idalabel[logits.argmax(-1 ).item()] )
_lowercase : str = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase, outputs.logits, atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_A : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
_A : str =parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = 0
A_ = False
A_ = 3.0
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowercase : int = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowercase : List[str] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , UpperCamelCase_ )
@require_multi_gpu
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
_A : List[Any] =DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
_A : str =Accelerator(kwargs_handlers=[ddp_scaler])
_A : str =torch.nn.Linear(1_0_0, 2_0_0)
_A : List[str] =accelerator.prepare(model)
# Check the values changed in kwargs
_A : List[str] =''''''
_A : Dict =model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_A : int ='''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
_A : Tuple ='''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
_A : Dict =r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = 0.0
for i, j in zip(UpperCamelCase_ , UpperCamelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase_ , UpperCamelCase_ ) else 0.0
_lowercase : int = n_correct / len(UpperCamelCase_ )
return {
"accuracy": accuracy,
}
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Any ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[Any] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : Any =dict(zip(vocab, range(len(vocab))))
_A : Optional[Any] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Optional[Any] =Path(tmpdirname)
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : Tuple =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : Union[str, Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : List[str] =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Optional[Any] =FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_A : Optional[int] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : List[Any] =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = IFInpaintingPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
return self._get_dummy_components()
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=0 ) -> Dict:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : List[str] = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_lowercase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self._test_save_load_local()
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
from typing import Any
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = data
_lowercase : Union[str, Any] = None
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return F'''Node({self.data})'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = None
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Dict = self.head
while node:
yield node.data
_lowercase : str = node.next
def __len__( self : str ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(UpperCamelCase_ ) for item in self] )
def __getitem__( self : Union[str, Any] , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_lowercase : Dict = self.head
for _ in range(UpperCamelCase_ ):
_lowercase : List[str] = current.next
_lowercase : Optional[int] = data
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_lowercase : List[str] = Node(UpperCamelCase_ )
if self.head is None:
_lowercase : List[Any] = new_node
elif index == 0:
_lowercase : Dict = self.head # link new_node to head
_lowercase : List[Any] = new_node
else:
_lowercase : Tuple = self.head
for _ in range(index - 1 ):
_lowercase : Tuple = temp.next
_lowercase : Union[str, Any] = temp.next
_lowercase : List[Any] = new_node
def __UpperCAmelCase ( self : Optional[int] ) -> None: # print every node data
'''simple docstring'''
print(self )
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_lowercase : List[Any] = self.head # default first node
if index == 0:
_lowercase : Union[str, Any] = self.head.next
else:
_lowercase : Dict = self.head
for _ in range(index - 1 ):
_lowercase : str = temp.next
_lowercase : Any = temp.next
_lowercase : Dict = temp.next.next
return delete_node.data
def __UpperCAmelCase ( self : Tuple ) -> bool:
'''simple docstring'''
return self.head is None
def __UpperCAmelCase ( self : Optional[int] ) -> None:
'''simple docstring'''
_lowercase : List[str] = None
_lowercase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
_lowercase : List[Any] = current.next
# Make the current node's next point backwards
_lowercase : List[Any] = prev
# Make the previous node be the current node
_lowercase : Optional[Any] = current
# Make the current node the next node (to progress iteration)
_lowercase : Optional[Any] = next_node
# Return prev in order to put the head at the end
_lowercase : Any = prev
def __UpperCamelCase ( ) -> None:
_lowercase : Optional[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase, i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1, 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0, 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1, 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
_lowercase : int = -i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8, 1 ) )
def __UpperCamelCase ( ) -> None:
_lowercase : List[Any] = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_lowercase : int = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_lowercase : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_lowercase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_lowercase : Dict = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> Dict:
from doctest import testmod
testmod()
_lowercase : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
_lowercase : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
_lowercase : Tuple = DisjunctiveConstraint(UpperCamelCase_ )
self.assertTrue(isinstance(dc.token_ids , UpperCamelCase_ ) )
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint(UpperCamelCase_ ) # fails here
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = [[1, 2, 3], [1, 2, 4]]
_lowercase : Optional[int] = DisjunctiveConstraint(UpperCamelCase_ )
_lowercase : Any = dc.update(1 )
_lowercase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowercase : Dict = dc.update(2 )
_lowercase : int = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowercase : Optional[int] = dc.update(3 )
_lowercase : str = stepped is True and completed is True and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
_lowercase : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowercase : Union[str, Any] = DisjunctiveConstraint(UpperCamelCase_ )
_lowercase : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowercase : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowercase : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_lowercase : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_lowercase : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_lowercase : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_lowercase : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : int ='''examples/'''
_A : Optional[int] ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : Tuple ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : Union[str, Any] ='''README.md'''
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
with open(_lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
_lowercase : Optional[int] = f.read()
_lowercase : List[str] = REPLACE_PATTERNS[pattern]
_lowercase : Optional[int] = replace.replace('VERSION', _lowercase )
_lowercase : Optional[Any] = re_pattern.sub(_lowercase, _lowercase )
with open(_lowercase, 'w', encoding='utf-8', newline='\n' ) as f:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase ) -> Tuple:
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_lowercase, _lowercase ), _lowercase, pattern='examples' )
def __UpperCamelCase ( _lowercase, _lowercase=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase, _lowercase, _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def __UpperCamelCase ( ) -> str:
_lowercase : Union[str, Any] = '🤗 Transformers currently provides the following architectures'
_lowercase : List[str] = '1. Want to contribute a new model?'
with open(_lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
_lowercase : Dict = f.readlines()
# Find the start of the list.
_lowercase : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowercase : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_lowercase : Optional[Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc', 'https://huggingface.co/docs/transformers/model_doc', )
index += 1
with open(_lowercase, 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(_lowercase )
def __UpperCamelCase ( ) -> str:
with open(REPLACE_FILES['init'], 'r' ) as f:
_lowercase : Optional[int] = f.read()
_lowercase : List[Any] = REPLACE_PATTERNS['init'][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def __UpperCamelCase ( _lowercase=False ) -> Tuple:
_lowercase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_lowercase : int = default_version.base_version
elif patch:
_lowercase : Optional[int] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_lowercase : Optional[int] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_lowercase : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(_lowercase ) == 0:
_lowercase : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(_lowercase, patch=_lowercase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def __UpperCamelCase ( ) -> List[Any]:
_lowercase : Dict = get_version()
_lowercase : Tuple = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_lowercase : Optional[Any] = current_version.base_version
# Check with the user we got that right.
_lowercase : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(_lowercase ) == 0:
_lowercase : int = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(_lowercase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : Tuple =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
'''simple docstring'''
_lowercase : Dict = len(_lowercase )
_lowercase : Any = int(math.floor(math.sqrt(_lowercase ) ) )
_lowercase : int = 0
while arr[min(_lowercase, _lowercase ) - 1] < x:
_lowercase : List[str] = step
step += int(math.floor(math.sqrt(_lowercase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowercase : int = prev + 1
if prev == min(_lowercase, _lowercase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_A : str =input('''Enter numbers separated by a comma:\n''').strip()
_A : int =[int(item) for item in user_input.split(''',''')]
_A : str =int(input('''Enter the number to be searched:\n'''))
_A : Optional[Any] =jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F'''Number {x} is at index {res}''')
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = (DDIMParallelScheduler,)
A_ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def __UpperCAmelCase ( self : Any , **UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCamelCase_ )
return config
def __UpperCAmelCase ( self : List[str] , **UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : List[str] = self.get_scheduler_config(**UpperCamelCase_ )
_lowercase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
_lowercase : Dict = 10, 0.0
_lowercase : Any = self.dummy_model()
_lowercase : Any = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for t in scheduler.timesteps:
_lowercase : List[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : List[Any] = self.get_scheduler_config(steps_offset=1 )
_lowercase : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = self.scheduler_classes[0]
_lowercase : List[Any] = self.get_scheduler_config()
_lowercase : str = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.scheduler_classes[0]
_lowercase : Optional[Any] = self.get_scheduler_config()
_lowercase : List[Any] = scheduler_class(**UpperCamelCase_ )
_lowercase : Tuple = 10, 0.0
scheduler.set_timesteps(UpperCamelCase_ )
_lowercase : List[str] = self.dummy_model()
_lowercase : List[str] = self.dummy_sample_deter
_lowercase : Optional[Any] = self.dummy_sample_deter + 0.1
_lowercase : str = self.dummy_sample_deter - 0.1
_lowercase : str = samplea.shape[0]
_lowercase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowercase : str = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
_lowercase : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowercase : List[str] = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ )
_lowercase : Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : Any = self.full_loop()
_lowercase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : int = self.full_loop(prediction_type='v_prediction' )
_lowercase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
_lowercase : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.01 )
_lowercase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
_lowercase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : int=2 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Any=99 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Optional[int]=36 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : Any = parent
_lowercase : Tuple = batch_size
_lowercase : Dict = seq_length
_lowercase : str = is_training
_lowercase : Tuple = use_input_mask
_lowercase : int = use_token_type_ids
_lowercase : str = use_labels
_lowercase : Any = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : int = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : Dict = type_sequence_label_size
_lowercase : Tuple = initializer_range
_lowercase : str = num_labels
_lowercase : str = num_choices
_lowercase : Optional[int] = scope
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_input_mask:
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Tuple = None
_lowercase : Any = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : List[str] = self.get_config()
_lowercase : Optional[int] = 300
return config
def __UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
(
_lowercase
) : Any = self.prepare_config_and_inputs()
_lowercase : Union[str, Any] = True
_lowercase : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Any = MraModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
_lowercase : Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
_lowercase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = True
_lowercase : Union[str, Any] = MraModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
_lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = MraForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = MraForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Dict = MraForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> Optional[int]:
'''simple docstring'''
_lowercase : Dict = self.num_labels
_lowercase : int = MraForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = self.num_choices
_lowercase : int = MraForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Tuple = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : int = self.prepare_config_and_inputs()
(
_lowercase
) : str = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = ()
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = MraModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Optional[int] = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = MraModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
return
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_lowercase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : str = model(UpperCamelCase_ )[0]
_lowercase : str = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : List[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_lowercase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Union[str, Any] = model(UpperCamelCase_ )[0]
_lowercase : int = 5_0265
_lowercase : Optional[int] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : Dict = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_lowercase : Optional[int] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : List[str] = model(UpperCamelCase_ )[0]
_lowercase : Optional[Any] = 5_0265
_lowercase : Dict = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
_lowercase : str = u
for i in range(1, _lowercase ):
_lowercase : str = temp * (u - i)
return temp
def __UpperCamelCase ( ) -> None:
_lowercase : Any = int(input('enter the numbers of values: ' ) )
_lowercase : list[list[float]] = []
for _ in range(_lowercase ):
y.append([] )
for i in range(_lowercase ):
for j in range(_lowercase ):
y[i].append(_lowercase )
_lowercase : List[str] = 0
print('enter the values of parameters in a list: ' )
_lowercase : Dict = list(map(_lowercase, input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_lowercase ):
_lowercase : Union[str, Any] = float(input() )
_lowercase : str = int(input('enter the value to interpolate: ' ) )
_lowercase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, _lowercase ):
for j in range(n - i ):
_lowercase : Tuple = y[j + 1][i - 1] - y[j][i - 1]
_lowercase : List[str] = y[0][0]
for i in range(1, _lowercase ):
summ += (ucal(_lowercase, _lowercase ) * y[0][i]) / math.factorial(_lowercase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowercase ) * abs(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCamelCase ( ) -> Any:
_lowercase : int = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=_lowercase, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=_lowercase, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=_lowercase )
return parser.parse_args()
def __UpperCamelCase ( ) -> Optional[Any]:
_lowercase : str = parse_args()
# Import training_script as a module.
_lowercase : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowercase : Union[str, Any] = script_fpath.stem
_lowercase : Optional[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
_lowercase : Tuple = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> float:
_lowercase : Union[str, Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowercase )] )
_lowercase : List[str] = np.array(_lowercase )
_lowercase : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), _lowercase ) ), x.transpose() ), _lowercase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> float:
_lowercase : List[str] = (1, 2, 1)
_lowercase : Dict = (1, 1, 0, 7)
_lowercase : Union[str, Any] = SARIMAX(
_lowercase, exog=_lowercase, order=_lowercase, seasonal_order=_lowercase )
_lowercase : str = model.fit(disp=_lowercase, maxiter=600, method='nm' )
_lowercase : int = model_fit.predict(1, len(_lowercase ), exog=[test_match] )
return result[0]
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> float:
_lowercase : Any = SVR(kernel='rbf', C=1, gamma=0.1, epsilon=0.1 )
regressor.fit(_lowercase, _lowercase )
_lowercase : int = regressor.predict(_lowercase )
return y_pred[0]
def __UpperCamelCase ( _lowercase ) -> float:
train_user.sort()
_lowercase : str = np.percentile(_lowercase, 25 )
_lowercase : Union[str, Any] = np.percentile(_lowercase, 75 )
_lowercase : Optional[int] = qa - qa
_lowercase : Optional[Any] = qa - (iqr * 0.1)
return low_lim
def __UpperCamelCase ( _lowercase, _lowercase ) -> bool:
_lowercase : Any = 0
_lowercase : List[str] = 0
for i in list_vote:
if i > actual_result:
_lowercase : Optional[Any] = not_safe + 1
else:
if abs(abs(_lowercase ) - abs(_lowercase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_A : int =[[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
_A : List[str] =pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
_A : Union[str, Any] =Normalizer().fit_transform(data_input_df.values)
# split data
_A : Any =normalize_df[:, 2].tolist()
_A : Dict =normalize_df[:, 0].tolist()
_A : List[Any] =normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_A : Optional[Any] =normalize_df[:, [1, 2]].tolist()
_A : str =x[: len(x) - 1]
_A : str =x[len(x) - 1 :]
# for linear regression & sarimax
_A : str =total_date[: len(total_date) - 1]
_A : Tuple =total_user[: len(total_user) - 1]
_A : Union[str, Any] =total_match[: len(total_match) - 1]
_A : Optional[Any] =total_date[len(total_date) - 1 :]
_A : Optional[int] =total_user[len(total_user) - 1 :]
_A : Optional[Any] =total_match[len(total_match) - 1 :]
# voting system with forecasting
_A : Dict =[
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_A : Any ='''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=13 , UpperCamelCase_ : str=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=99 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Any=None , ) -> List[str]:
'''simple docstring'''
_lowercase : Any = parent
_lowercase : Tuple = batch_size
_lowercase : List[Any] = seq_length
_lowercase : Any = is_training
_lowercase : List[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Dict = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Union[str, Any] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
_lowercase : str = num_labels
_lowercase : Tuple = num_choices
_lowercase : str = scope
_lowercase : List[str] = self.vocab_size - 1
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Optional[Any] = None
_lowercase : List[str] = None
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : List[str] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowercase : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict , *UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = OpenAIGPTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : str = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
_lowercase : List[str] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
_lowercase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , *UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = OpenAIGPTLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Tuple = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , *UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowercase : int = OpenAIGPTDoubleHeadsModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Any = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , *UpperCamelCase_ : List[str] ) -> Any:
'''simple docstring'''
_lowercase : Dict = self.num_labels
_lowercase : Optional[int] = OpenAIGPTForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Any = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
(
_lowercase
) : Tuple = config_and_inputs
_lowercase : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=False ) -> List[Any]:
'''simple docstring'''
_lowercase : List[str] = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowercase : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase_ , )
_lowercase : Optional[int] = inputs_dict['labels']
_lowercase : Optional[Any] = inputs_dict['labels']
_lowercase : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase_ , )
_lowercase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def __UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = OpenAIGPTModelTester(self )
_lowercase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = OpenAIGPTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(UpperCamelCase_ )
_lowercase : Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase_ ) # the president is
_lowercase : Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowercase : Optional[Any] = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase ( ) -> List[str]:
_lowercase : Optional[int] = 9, 14 # noqa: F841
_lowercase : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowercase : int = defaultdict(_lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowercase : Optional[Any] = mst(_lowercase )
_lowercase : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowercase : Any = tuple(answer[:2] )
_lowercase : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
_A : Dict ={
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
1_0: '''a''',
1_1: '''b''',
1_2: '''c''',
1_3: '''d''',
1_4: '''e''',
1_5: '''f''',
}
def __UpperCamelCase ( _lowercase ) -> str:
assert type(_lowercase ) in (int, float) and decimal == int(_lowercase )
_lowercase : int = int(_lowercase )
_lowercase : Dict = ''
_lowercase : Optional[int] = False
if decimal < 0:
_lowercase : Optional[int] = True
decimal *= -1
while decimal > 0:
_lowercase : str = divmod(_lowercase, 16 )
_lowercase : List[str] = values[remainder] + hexadecimal
_lowercase : Optional[Any] = '0x' + hexadecimal
if negative:
_lowercase : Optional[Any] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> int:
if not isinstance(_lowercase, _lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
A_ = BlenderbotSmallConfig
A_ = {}
A_ = """gelu"""
def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : str=99 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Union[str, Any]=37 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Dict=20 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : Optional[int]=0 , ) -> str:
'''simple docstring'''
_lowercase : Tuple = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Union[str, Any] = seq_length
_lowercase : Dict = is_training
_lowercase : List[str] = use_labels
_lowercase : int = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : str = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : List[str] = pad_token_id
_lowercase : str = bos_token_id
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowercase : int = prepare_blenderbot_small_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = TFBlenderbotSmallModel(config=UpperCamelCase_ ).get_decoder()
_lowercase : str = inputs_dict['input_ids']
_lowercase : Union[str, Any] = input_ids[:1, :]
_lowercase : List[str] = inputs_dict['attention_mask'][:1, :]
_lowercase : Optional[Any] = inputs_dict['head_mask']
_lowercase : Optional[int] = 1
# first forward pass
_lowercase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
_lowercase : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
_lowercase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
_lowercase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, ) -> Tuple:
if attention_mask is None:
_lowercase : Any = tf.cast(tf.math.not_equal(_lowercase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_lowercase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_lowercase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
A_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
A_ = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_lowercase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
A_ = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
A_ = """facebook/blenderbot_small-90M"""
@cached_property
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.tokenizer(self.src_text , return_tensors='tf' )
_lowercase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase_ , )
_lowercase : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : List[str] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : str ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import qiskit
def __UpperCamelCase ( _lowercase, _lowercase ) -> qiskit.result.counts.Counts:
_lowercase : Tuple = qiskit.Aer.get_backend('aer_simulator' )
_lowercase : Optional[Any] = qiskit.QuantumCircuit(4, 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0, 2 )
qc_ha.cx(1, 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0, 1, 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2, 0 ) # extract XOR value
qc_ha.measure(3, 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_lowercase : Tuple = qiskit.execute(_lowercase, _lowercase, shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
_A : Any =half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=99 , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=16 , UpperCamelCase_ : str=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : str=30 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = parent
_lowercase : Tuple = batch_size
_lowercase : Dict = decoder_seq_length
# For common tests
_lowercase : Optional[Any] = self.decoder_seq_length
_lowercase : str = is_training
_lowercase : Optional[Any] = use_attention_mask
_lowercase : int = use_labels
_lowercase : Dict = vocab_size
_lowercase : str = d_model
_lowercase : Any = d_model
_lowercase : Dict = decoder_layers
_lowercase : Dict = decoder_layers
_lowercase : Optional[int] = decoder_ffn_dim
_lowercase : str = decoder_attention_heads
_lowercase : Union[str, Any] = decoder_attention_heads
_lowercase : str = eos_token_id
_lowercase : Dict = bos_token_id
_lowercase : Dict = pad_token_id
_lowercase : Tuple = decoder_start_token_id
_lowercase : Optional[Any] = use_cache
_lowercase : int = max_position_embeddings
_lowercase : List[str] = None
_lowercase : str = decoder_seq_length
_lowercase : List[str] = 2
_lowercase : str = 1
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : Optional[Any] = None
if self.use_attention_mask:
_lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowercase : Union[str, Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = True
_lowercase : Union[str, Any] = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
_lowercase : int = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowercase : Optional[int] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
_lowercase : Optional[int] = model(UpperCamelCase_ )
_lowercase : Optional[int] = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
_lowercase : List[Any] = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
_lowercase : int = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowercase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase : int = model(UpperCamelCase_ )['last_hidden_state']
_lowercase : Dict = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['last_hidden_state']
# select random slice
_lowercase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase : Union[str, Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowercase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : Optional[int] = config_and_inputs
_lowercase : int = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A_ = (TrOCRForCausalLM,) if is_torch_available() else ()
A_ = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
A_ = True
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase_ )
_lowercase : int = ConfigTester(self , config_class=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
pass
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_A : List[Any] =logging.get_logger(__name__)
_A : int =OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
_A : List[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : int = model_type_to_module_name(_lowercase )
_lowercase : List[Any] = importlib.import_module(f'''.{module_name}''', 'transformers.models' )
try:
return getattr(_lowercase, _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase, '__name__', _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : str = importlib.import_module('transformers' )
if hasattr(_lowercase, _lowercase ):
return getattr(_lowercase, _lowercase )
return None
def __UpperCamelCase ( _lowercase, _lowercase = None, _lowercase = False, _lowercase = False, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = False, **_lowercase, ) -> str:
_lowercase : List[Any] = get_file_from_repo(
_lowercase, _lowercase, cache_dir=_lowercase, force_download=_lowercase, resume_download=_lowercase, proxies=_lowercase, use_auth_token=_lowercase, revision=_lowercase, local_files_only=_lowercase, )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowercase, encoding='utf-8' ) as reader:
return json.load(_lowercase )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Dict ) -> List[Any]:
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __UpperCAmelCase ( cls : Optional[Any] , UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : List[Any] = kwargs.pop('config' , UpperCamelCase_ )
_lowercase : Optional[int] = kwargs.pop('trust_remote_code' , UpperCamelCase_ )
_lowercase : Optional[Any] = True
_lowercase : Any = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = config_dict.get('feature_extractor_type' , UpperCamelCase_ )
_lowercase : Dict = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowercase : Tuple = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Any = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.feature_extractor_type``
_lowercase : Union[str, Any] = getattr(UpperCamelCase_ , 'feature_extractor_type' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
_lowercase : Optional[Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowercase : Any = feature_extractor_class_from_name(UpperCamelCase_ )
_lowercase : str = feature_extractor_auto_map is not None
_lowercase : str = feature_extractor_class is not None or type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
_lowercase : List[Any] = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : str = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : int = kwargs.pop('code_revision' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
_lowercase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase_ )]
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Dict:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( ) -> int:
return 1
def __UpperCamelCase ( _lowercase ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __UpperCamelCase ( _lowercase ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowercase )
def __UpperCamelCase ( _lowercase ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowercase )
def __UpperCamelCase ( _lowercase ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowercase )
def __UpperCamelCase ( _lowercase ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowercase )
def __UpperCamelCase ( _lowercase ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowercase )
def __UpperCamelCase ( _lowercase ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowercase )
def __UpperCamelCase ( _lowercase = 200 ) -> int:
return two_pound(_lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=13 , UpperCamelCase_ : int=32 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[int]=[10, 20, 30, 40] , UpperCamelCase_ : Optional[Any]=[2, 2, 3, 2] , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : int=["stage2", "stage3", "stage4"] , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : List[str]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[Any] = image_size
_lowercase : Union[str, Any] = num_channels
_lowercase : List[Any] = num_stages
_lowercase : Optional[int] = hidden_sizes
_lowercase : int = depths
_lowercase : Optional[int] = is_training
_lowercase : Any = use_labels
_lowercase : Any = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : Tuple = type_sequence_label_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Tuple = out_features
_lowercase : Tuple = num_labels
_lowercase : Any = scope
_lowercase : int = num_stages
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
_lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : List[str] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = UperNetForSemanticSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : int = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.prepare_config_and_inputs()
(
_lowercase
) : str = config_and_inputs
_lowercase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A_ = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
_lowercase : Tuple = UperNetModelTester(self )
_lowercase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
return
def __UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
_lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = model_class(UpperCamelCase_ )
_lowercase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[Any] = [*signature.parameters.keys()]
_lowercase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
_lowercase : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
_lowercase : str = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : int = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[str] = _config_zero_init(UpperCamelCase_ )
_lowercase : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowercase : str = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCamelCase ( ) -> int:
_lowercase : Dict = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k', repo_type='dataset', filename='ADE_val_00000001.jpg' )
_lowercase : Optional[int] = Image.open(_lowercase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
_lowercase : List[str] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(UpperCamelCase_ )
_lowercase : Optional[Any] = prepare_img()
_lowercase : Optional[int] = processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
with torch.no_grad():
_lowercase : List[str] = model(**UpperCamelCase_ )
_lowercase : Dict = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
_lowercase : Optional[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : str = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
_lowercase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(UpperCamelCase_ )
_lowercase : Tuple = prepare_img()
_lowercase : List[Any] = processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
with torch.no_grad():
_lowercase : Optional[int] = model(**UpperCamelCase_ )
_lowercase : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
_lowercase : int = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=13 , UpperCamelCase_ : Dict=30 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Any=32 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any=37 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=None , ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = parent
_lowercase : List[str] = batch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = patch_size
_lowercase : List[str] = num_channels
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_labels
_lowercase : Any = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : str = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Any = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : int = (image_size // patch_size) ** 2
_lowercase : int = num_patches + 1
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
_lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Any = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : List[str] = TFViTModel(config=UpperCamelCase_ )
_lowercase : Optional[Any] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowercase : Union[str, Any] = self.image_size // 2
_lowercase : List[str] = pixel_values[:, :, :image_size, :image_size]
_lowercase : str = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
_lowercase : Dict = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Tuple = self.type_sequence_label_size
_lowercase : int = TFViTForImageClassification(UpperCamelCase_ )
_lowercase : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowercase : List[Any] = self.image_size // 2
_lowercase : Any = pixel_values[:, :, :image_size, :image_size]
_lowercase : Optional[Any] = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase : Dict = 1
_lowercase : Union[str, Any] = TFViTForImageClassification(UpperCamelCase_ )
_lowercase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
_lowercase : int = config_and_inputs
_lowercase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A_ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : str = TFViTModelTester(self )
_lowercase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowercase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[Any] = model_class(UpperCamelCase_ )
_lowercase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCamelCase ( ) -> int:
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_lowercase : List[Any] = self.default_image_processor
_lowercase : Dict = prepare_img()
_lowercase : int = image_processor(images=UpperCamelCase_ , return_tensors='tf' )
# forward pass
_lowercase : Optional[int] = model(**UpperCamelCase_ )
# verify the logits
_lowercase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
_lowercase : str = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Optional[Any] ='''▁'''
_A : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
_A : Any ={
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
_A : Optional[Any] =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ["""input_ids""", """attention_mask"""]
A_ = []
A_ = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : List[str]="<mask>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : int , ) -> None:
'''simple docstring'''
_lowercase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
_lowercase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase : List[str] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
_lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
_lowercase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase : List[str] = 1
_lowercase : str = len(self.sp_model )
_lowercase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
_lowercase : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_lowercase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowercase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowercase : Any = src_lang if src_lang is not None else 'en_XX'
_lowercase : str = self.lang_code_to_id[self._src_lang]
_lowercase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Dict:
'''simple docstring'''
_lowercase : int = self.__dict__.copy()
_lowercase : Tuple = None
return state
def __setstate__( self : List[str] , UpperCamelCase_ : Dict ) -> None:
'''simple docstring'''
_lowercase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowercase : Optional[int] = {}
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : str = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : int , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = []
_lowercase : Tuple = ''
_lowercase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
_lowercase : str = True
_lowercase : Tuple = []
else:
current_sub_tokens.append(UpperCamelCase_ )
_lowercase : Any = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , 'wb' ) as fi:
_lowercase : int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
_lowercase : Tuple = [1] * len(self.prefix_tokens )
_lowercase : List[str] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowercase : Optional[Any] = src_lang
_lowercase : Optional[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
_lowercase : int = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str = "en_XX" , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "ro_RO" , **UpperCamelCase_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
_lowercase : Any = src_lang
_lowercase : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : List[Any] = self.lang_code_to_id[src_lang]
_lowercase : Dict = [self.cur_lang_code_id]
_lowercase : Optional[int] = [self.eos_token_id]
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : List[Any] = self.lang_code_to_id[tgt_lang]
_lowercase : Union[str, Any] = [self.cur_lang_code_id]
_lowercase : Any = [self.eos_token_id]
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from collections import defaultdict
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
_lowercase : List[str] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_lowercase : int = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
_lowercase : Optional[int] = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_lowercase : Union[str, Any] = (1 << len(UpperCamelCase_ )) - 1
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_lowercase : List[str] = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_lowercase : Dict = total_ways_util
return self.dp[mask][task_no]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A : Dict =5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A : List[str] =[[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : nn.Module , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
super().__init__()
_lowercase : Dict = module
_lowercase : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase_ , bias=UpperCamelCase_ ) , nn.Linear(UpperCamelCase_ , module.out_features , bias=UpperCamelCase_ ) , )
_lowercase : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.module(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) + self.adapter(UpperCamelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
A_ = """bigscience/bloom-1b7"""
# Constant values
A_ = 2.1_09_65_95_52_69_25_74
A_ = """Hello my name is"""
A_ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
A_ = 10
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
_lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
_lowercase : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase_ , 'quantization_config' ) )
_lowercase : int = config.to_dict()
_lowercase : Union[str, Any] = config.to_diff_dict()
_lowercase : Optional[Any] = config.to_json_string()
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
_lowercase : Any = self.model_fpaa.get_memory_footprint()
_lowercase : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_lowercase : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
_lowercase : List[Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = BitsAndBytesConfig()
_lowercase : Optional[int] = True
_lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase_ , device_map='auto' )
_lowercase : str = self.tokenizer(self.input_text , return_tensors='pt' )
_lowercase : Tuple = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
with self.assertRaises(UpperCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase_ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase_ ):
_lowercase : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase_ , load_in_abit=UpperCamelCase_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(UpperCamelCase_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_lowercase : int = self.tokenizer(self.input_text , return_tensors='pt' )
_lowercase : Union[str, Any] = self.model_fpaa.to(torch.floataa )
_lowercase : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
_lowercase : Tuple = self.model_fpaa.to('cpu' )
# Check this does not throw an error
_lowercase : Dict = self.model_fpaa.half()
# Check this does not throw an error
_lowercase : Tuple = self.model_fpaa.float()
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=UpperCamelCase_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls : Tuple ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = 't5-small'
_lowercase : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_lowercase : Optional[int] = AutoTokenizer.from_pretrained(cls.model_name )
_lowercase : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
from transformers import TaForConditionalGeneration
_lowercase : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
_lowercase : Optional[Any] = None
# test with `t5-small`
_lowercase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
_lowercase : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : Optional[Any] = model.generate(**UpperCamelCase_ )
# test with `flan-t5-small`
_lowercase : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
_lowercase : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : int = model.generate(**UpperCamelCase_ )
_lowercase : Optional[Any] = modules
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_lowercase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_lowercase : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : List[Any] = model.generate(**UpperCamelCase_ )
# test with `flan-t5-small`
_lowercase : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
_lowercase : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
_lowercase : List[Any] = model.generate(**UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
super().setUp()
# model_name
_lowercase : str = 'bigscience/bloom-560m'
_lowercase : str = 't5-small'
# Different types of model
_lowercase : Tuple = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# Sequence classification model
_lowercase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# CausalLM model
_lowercase : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
# Seq2seq model
_lowercase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase_ , device_map='auto' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowercase : str = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_lowercase : int = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_lowercase : int = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
_lowercase : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
_lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_lowercase : List[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_lowercase : int = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase_ ) ):
_lowercase : Optional[Any] = LoRALayer(module.q_proj , rank=16 )
_lowercase : Optional[int] = LoRALayer(module.k_proj , rank=16 )
_lowercase : Tuple = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
_lowercase : Dict = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_lowercase : List[str] = model.forward(**UpperCamelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """gpt2-xl"""
A_ = 3.31_91_85_48_54_15_21_87
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_A : Tuple =1.0_54_57_18_17e-34 # unit of ℏ : J * s
_A : str =3e8 # unit of c : m * s^-1
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
_lowercase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowercase : Optional[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowercase : int = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Union[str, Any] ={
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
import math
import os
import sys
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Dict = ''
try:
with open(_lowercase, 'rb' ) as binary_file:
_lowercase : List[Any] = binary_file.read()
for dat in data:
_lowercase : Optional[Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase ) -> None:
lexicon.pop(_lowercase )
_lowercase : Dict = last_match_id
if math.loga(_lowercase ).is_integer():
for curr_key in lexicon:
_lowercase : Optional[int] = '0' + lexicon[curr_key]
_lowercase : Union[str, Any] = bin(_lowercase )[2:]
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Tuple = {'0': '0', '1': '1'}
_lowercase : Any = '', ''
_lowercase : Dict = len(_lowercase )
for i in range(len(_lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowercase : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_lowercase, _lowercase, _lowercase, _lowercase )
index += 1
_lowercase : Tuple = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowercase : Any = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Union[str, Any] = os.path.getsize(_lowercase )
_lowercase : Optional[Any] = bin(_lowercase )[2:]
_lowercase : str = len(_lowercase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( _lowercase, _lowercase ) -> None:
_lowercase : Dict = 8
try:
with open(_lowercase, 'wb' ) as opened_file:
_lowercase : str = [
to_write[i : i + byte_length]
for i in range(0, len(_lowercase ), _lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_lowercase, 2 ).to_bytes(1, byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase ( _lowercase, _lowercase ) -> None:
_lowercase : Dict = read_file_binary(_lowercase )
_lowercase : Optional[Any] = compress_data(_lowercase )
_lowercase : int = add_file_length(_lowercase, _lowercase )
write_file_binary(_lowercase, _lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_A : Union[str, Any] ={
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCamelCase ( ) -> List[str]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowercase : Optional[int] = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching, 'os.path.join', _lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCamelCase ( ) -> str:
assert _test_patching.open is open
_lowercase : Any = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, 'open', _lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCamelCase ( ) -> int:
# pandas.read_csv is not present in _test_patching
_lowercase : Tuple = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching, 'pandas.read_csv', _lowercase ):
pass
def __UpperCamelCase ( ) -> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowercase : Optional[Any] = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, 'len', _lowercase ) is None
with patch_submodule(_test_patching, 'len', _lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCamelCase ( ) -> Any:
_lowercase : Union[str, Any] = '__test_patch_submodule_start_and_stop_mock__'
_lowercase : List[str] = patch_submodule(_test_patching, 'open', _lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCamelCase ( ) -> str:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowercase : Tuple = '__test_patch_submodule_successive_join__'
_lowercase : List[str] = '__test_patch_submodule_successive_dirname__'
_lowercase : Union[str, Any] = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, 'os.path.join', _lowercase ):
with patch_submodule(_test_patching, 'os.rename', _lowercase ):
with patch_submodule(_test_patching, 'os.path.dirname', _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, 'os.rename', _lowercase ):
with patch_submodule(_test_patching, 'os.path.join', _lowercase ):
with patch_submodule(_test_patching, 'os.path.dirname', _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCamelCase ( ) -> Any:
_lowercase : Union[str, Any] = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching, '__module_that_doesn_exist__.__attribute_that_doesn_exist__', _lowercase ):
pass
with patch_submodule(_test_patching, 'os.__attribute_that_doesn_exist__', _lowercase ):
pass
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_A : Dict =logging.getLogger(__name__)
def __UpperCamelCase ( _lowercase=2, _lowercase=3, _lowercase=16, _lowercase = 10, _lowercase = 2 ) -> Tuple:
def get_dataset(_lowercase ):
_lowercase : int = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(_lowercase, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
_lowercase : Dict = get_dataset(_lowercase )
_lowercase : Dict = get_dataset(_lowercase )
_lowercase : str = DataLoader(_lowercase, shuffle=_lowercase, batch_size=_lowercase, num_workers=4 )
_lowercase : Any = DataLoader(_lowercase, shuffle=_lowercase, batch_size=_lowercase, num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase=None ) -> Dict:
_lowercase : Any = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
_lowercase : List[Any] = batch
_lowercase : Dict = model(_lowercase )
_lowercase : Any = torch.nn.functional.mse_loss(_lowercase, _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
super().__init__()
_lowercase : Any = nn.Parameter(torch.randn(1 ) )
_lowercase : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return x * self.a + self.b
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Union[str, Any] = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : Union[str, Any] = dummy_dataloaders()
_lowercase : List[Any] = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_lowercase : List[Any] = Accelerator(project_config=UpperCamelCase_ )
_lowercase : Union[str, Any] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Any = DummyModel()
_lowercase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : Dict = dummy_dataloaders()
# Train baseline
_lowercase : List[Any] = Accelerator()
_lowercase : Dict = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
_lowercase : Any = os.path.join(UpperCamelCase_ , 'initial' )
accelerator.save_state(UpperCamelCase_ )
(_lowercase) : Union[str, Any] = model.a.item(), model.b.item()
_lowercase : Optional[Any] = optimizer.state_dict()
_lowercase : int = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
(_lowercase) : str = model.a.item(), model.b.item()
_lowercase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : int = DummyModel()
_lowercase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : Any = dummy_dataloaders()
_lowercase : List[str] = Accelerator()
_lowercase : List[str] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(UpperCamelCase_ )
(_lowercase) : List[Any] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : str = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
_lowercase : Tuple = os.path.join(UpperCamelCase_ , 'checkpoint' )
accelerator.save_state(UpperCamelCase_ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase_ )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
(_lowercase) : str = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Optional[Any] = DummyModel()
_lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : Dict = dummy_dataloaders()
_lowercase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_lowercase : Optional[Any] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase : int = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
(_lowercase) : int = model.a.item(), model.b.item()
_lowercase : Dict = optimizer.state_dict()
_lowercase : Optional[int] = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
(_lowercase) : Optional[int] = model.a.item(), model.b.item()
_lowercase : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : Tuple = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : List[Any] = dummy_dataloaders()
_lowercase : Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ )
_lowercase : Optional[int] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase : Dict = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_0' ) )
(_lowercase) : str = model.a.item(), model.b.item()
_lowercase : Tuple = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
(_lowercase) : Any = model.a.item(), model.b.item()
_lowercase : List[Any] = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
_lowercase : str = torch.tensor([1, 2, 3] )
_lowercase : Union[str, Any] = torch.tensor([2, 3, 4] )
_lowercase : Union[str, Any] = DummyModel()
_lowercase : Union[str, Any] = torch.optim.Adam(net.parameters() )
_lowercase : int = Accelerator()
with self.assertRaises(UpperCamelCase_ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Dict = DummyModel()
_lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : List[str] = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 )
_lowercase : str = dummy_dataloaders()
_lowercase : Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_lowercase : List[str] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase : List[str] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
_lowercase : str = scheduler.state_dict()
train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(UpperCamelCase_ , scheduler.state_dict() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Optional[Any] = DummyModel()
_lowercase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 )
# Train baseline
_lowercase : List[str] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase : List[Any] = accelerator.prepare(UpperCamelCase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : List[str] = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
_A : Union[str, Any] ='''/tmp/accelerate/state_checkpointing'''
_A : Any =DummyModel()
_A : int =torch.optim.Adam(params=model.parameters(), lr=1e-3)
_A : str =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_A : int =dummy_dataloaders()
_A : Dict =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_A : Optional[Any] =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_A : Optional[Any] =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_A : Any =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_A : List[Any] =group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
_A : Optional[Any] =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
_A : Tuple =group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
_A : Optional[int] =group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = self.dummy_uncond_unet
_lowercase : List[str] = ScoreSdeVeScheduler()
_lowercase : Optional[int] = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Any = torch.manual_seed(0 )
_lowercase : List[str] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCamelCase_ ).images
_lowercase : str = torch.manual_seed(0 )
_lowercase : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[
0
]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = 'google/ncsnpp-church-256'
_lowercase : int = UNetaDModel.from_pretrained(UpperCamelCase_ )
_lowercase : List[Any] = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ )
_lowercase : Optional[Any] = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : int = torch.manual_seed(0 )
_lowercase : Any = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=UpperCamelCase_ ).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase : List[str] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
from collections import deque
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : Dict = process_name # process name
_lowercase : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowercase : Optional[Any] = arrival_time
_lowercase : int = burst_time # remaining burst time
_lowercase : Optional[Any] = 0 # total time of the process wait in ready queue
_lowercase : Dict = 0 # time from arrival time to completion time
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : list[int] , UpperCamelCase_ : deque[Process] , UpperCamelCase_ : int , ) -> None:
'''simple docstring'''
_lowercase : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
_lowercase : Optional[Any] = time_slices
# unfinished process is in this ready_queue
_lowercase : List[str] = queue
# current time
_lowercase : List[Any] = current_time
# finished process is in this sequence queue
_lowercase : deque[Process] = deque()
def __UpperCAmelCase ( self : List[str] ) -> list[str]:
'''simple docstring'''
_lowercase : Tuple = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : list[Process] ) -> list[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
for i in range(len(UpperCamelCase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : list[Process] ) -> list[int]:
'''simple docstring'''
_lowercase : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : list[Process] ) -> list[int]:
'''simple docstring'''
_lowercase : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : deque[Process] ) -> deque[Process]:
'''simple docstring'''
_lowercase : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase_ ) != 0:
_lowercase : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowercase : Any = 0
# set the process's turnaround time because it is finished
_lowercase : Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
_lowercase : Union[str, Any] = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : deque[Process] , UpperCamelCase_ : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
_lowercase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase_ ) ):
_lowercase : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowercase : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowercase : Dict = 0
# set the finish time
_lowercase : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
_lowercase : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self : Optional[int] ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_lowercase : List[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_A : Any = Process('''P1''', 0, 5_3)
_A : List[Any] = Process('''P2''', 0, 1_7)
_A : Dict = Process('''P3''', 0, 6_8)
_A : str = Process('''P4''', 0, 2_4)
_A : List[str] = 3
_A : Optional[Any] = [1_7, 2_5]
_A : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_A : List[Any] = Process('''P1''', 0, 5_3)
_A : Any = Process('''P2''', 0, 1_7)
_A : int = Process('''P3''', 0, 6_8)
_A : Any = Process('''P4''', 0, 2_4)
_A : List[Any] = 3
_A : List[Any] = [1_7, 2_5]
_A : List[str] = deque([Pa, Pa, Pa, Pa])
_A : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_A : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A : Optional[Any] =[
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_A : List[str] =[
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_A : int =(
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A : Any =(
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A : Optional[Any] =[
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
for tf_name, hf_name in patterns:
_lowercase : List[str] = k.replace(_lowercase, _lowercase )
return k
def __UpperCamelCase ( _lowercase, _lowercase ) -> BigBirdPegasusForConditionalGeneration:
_lowercase : List[str] = BigBirdPegasusConfig(**_lowercase )
_lowercase : List[Any] = BigBirdPegasusForConditionalGeneration(_lowercase )
_lowercase : Any = torch_model.state_dict()
_lowercase : List[str] = {}
# separating decoder weights
_lowercase : Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
_lowercase : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items(), 'tf -> hf conversion' ):
_lowercase : Tuple = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
_lowercase : str = DECODER_PATTERNS
_lowercase : Optional[Any] = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowercase : Union[str, Any] = v.T
_lowercase : str = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), 'tf -> hf conversion' ):
_lowercase : str = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
_lowercase : Tuple = REMAINING_PATTERNS
_lowercase : Optional[Any] = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowercase : List[Any] = v.T
_lowercase : Optional[Any] = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
_lowercase : Any = mapping['model.embed_positions.weight']
_lowercase : Optional[Any] = mapping.pop('model.embed_positions.weight' )
_lowercase : Any = torch_model.load_state_dict(_lowercase, strict=_lowercase )
_lowercase : Any = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __UpperCamelCase ( _lowercase ) -> Dict:
_lowercase : Tuple = tf.train.list_variables(_lowercase )
_lowercase : Union[str, Any] = {}
_lowercase : Optional[Any] = ['global_step']
for name, shape in tqdm(_lowercase, desc='converting tf checkpoint to dict' ):
_lowercase : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowercase : Optional[int] = tf.train.load_variable(_lowercase, _lowercase )
_lowercase : str = array
return tf_weights
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : int = get_tf_weights_as_numpy(_lowercase )
_lowercase : Dict = convert_bigbird_pegasus(_lowercase, _lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : int =parser.parse_args()
_A : int ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_A : str ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : str ={
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_A : Any ={
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
_A : List[str] ={
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = LxmertTokenizer
def __init__( self : int , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : str="[UNK]" , UpperCamelCase_ : Optional[Any]="[SEP]" , UpperCamelCase_ : List[str]="[PAD]" , UpperCamelCase_ : int="[CLS]" , UpperCamelCase_ : List[Any]="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
_lowercase : str = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
_lowercase : Dict = do_lower_case
_lowercase : Optional[int] = strip_accents
_lowercase : List[Any] = tokenize_chinese_chars
_lowercase : str = normalizer_class(**UpperCamelCase_ )
_lowercase : Optional[int] = do_lower_case
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict=None ) -> Optional[Any]:
'''simple docstring'''
_lowercase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_A : Tuple =datasets.logging.get_logger(__name__)
_A : Optional[Any] ='''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
_A : Dict ='''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
_A : Union[str, Any] ='''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
_A : Dict ={
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
_lowercase : Optional[int] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
_lowercase : int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_lowercase : Tuple = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_lowercase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_lowercase : str = score.BleurtScorer(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = self.scorer.score(references=UpperCamelCase_ , candidates=UpperCamelCase_ )
return {"scores": scores}
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCamelCase ( _lowercase ) -> int:
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCamelCase ( _lowercase ) -> List[str]:
_lowercase : Union[str, Any] = np.max(_outputs, axis=-1, keepdims=_lowercase )
_lowercase : List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowercase )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """sigmoid"""
A_ = """softmax"""
A_ = """none"""
@add_end_docstrings(
A , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = False
A_ = ClassificationFunction.NONE
def __init__( self : List[Any] , **UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any="" , **UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[int] = tokenizer_kwargs
_lowercase : Optional[Any] = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
_lowercase : Union[str, Any] = self.model.config.return_all_scores
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) or top_k is None:
_lowercase : List[Any] = top_k
_lowercase : Tuple = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , UpperCamelCase_ , )
if return_all_scores:
_lowercase : Optional[int] = None
else:
_lowercase : int = 1
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowercase : Optional[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowercase : Union[str, Any] = 'top_k' not in kwargs
if isinstance(args[0] , UpperCamelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> Dict[str, GenericTensor]:
'''simple docstring'''
_lowercase : Optional[Any] = self.framework
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.tokenizer(**UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1 and isinstance(inputs[0] , UpperCamelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
return self.model(**UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : List[Any]=True ) -> Union[str, Any]:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowercase : Any = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowercase : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
_lowercase : List[Any] = self.model.config.function_to_apply
else:
_lowercase : List[str] = ClassificationFunction.NONE
_lowercase : Optional[Any] = model_outputs['logits'][0]
_lowercase : int = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowercase : Union[str, Any] = sigmoid(UpperCamelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowercase : List[Any] = softmax(UpperCamelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
_lowercase : List[str] = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowercase : Union[str, Any] = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(UpperCamelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k is not None:
_lowercase : Any = dict_scores[:top_k]
return dict_scores
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_lowercase : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowercase : int = model(UpperCamelCase_ )['last_hidden_state']
_lowercase : Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
_lowercase : int = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int =logging.get_logger(__name__)
_A : Optional[int] ={
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """glpn"""
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Any=[2, 2, 2, 2] , UpperCamelCase_ : int=[8, 4, 2, 1] , UpperCamelCase_ : int=[32, 64, 160, 256] , UpperCamelCase_ : Dict=[7, 3, 3, 3] , UpperCamelCase_ : Dict=[4, 2, 2, 2] , UpperCamelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCamelCase_ : Union[str, Any]=[4, 4, 4, 4] , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=1E-6 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Tuple=10 , UpperCamelCase_ : str=-1 , **UpperCamelCase_ : int , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Tuple = num_channels
_lowercase : List[Any] = num_encoder_blocks
_lowercase : List[str] = depths
_lowercase : Optional[Any] = sr_ratios
_lowercase : int = hidden_sizes
_lowercase : Union[str, Any] = patch_sizes
_lowercase : int = strides
_lowercase : Optional[int] = mlp_ratios
_lowercase : str = num_attention_heads
_lowercase : int = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : List[str] = initializer_range
_lowercase : Optional[Any] = drop_path_rate
_lowercase : int = layer_norm_eps
_lowercase : str = decoder_hidden_size
_lowercase : Optional[int] = max_depth
_lowercase : Optional[int] = head_in_index
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """bert"""
def __init__( self : int , UpperCamelCase_ : List[str]=3_0522 , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Dict=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Union[str, Any]=512 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : Dict = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Tuple = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : str = position_embedding_type
_lowercase : Optional[int] = use_cache
_lowercase : str = classifier_dropout
class lowerCamelCase__ ( A ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : int ={
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_A : List[Any] =2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_A : Optional[Any] =5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_A : Any =0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def __UpperCamelCase ( _lowercase, _lowercase ) -> tuple[str, float]:
_lowercase : List[Any] = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def __UpperCamelCase ( _lowercase, _lowercase ) -> tuple[str, str]:
_lowercase : List[str] = random.randint(0, len(_lowercase ) - 1 )
_lowercase : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
_lowercase : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : int = list(_lowercase )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
_lowercase : List[str] = random.choice(_lowercase )
return "".join(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, ) -> list[str]:
_lowercase : Tuple = []
# Generate more children proportionally to the fitness score.
_lowercase : List[Any] = int(parent_a[1] * 100 ) + 1
_lowercase : Any = 10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
_lowercase : Dict = population_score[random.randint(0, _lowercase )][0]
_lowercase : str = crossover(parent_a[0], _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase, _lowercase ) )
pop.append(mutate(_lowercase, _lowercase ) )
return pop
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_lowercase : Optional[Any] = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowercase : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowercase : List[Any] = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(_lowercase )
# Generate random starting population.
_lowercase : List[Any] = []
for _ in range(_lowercase ):
population.append(''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowercase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowercase : List[Any] = [evaluate(_lowercase, _lowercase ) for item in population]
# Check if there is a matching evolution.
_lowercase : Optional[Any] = sorted(_lowercase, key=lambda _lowercase : x[1], reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowercase : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
_lowercase : List[str] = [
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )], _lowercase, _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
_A : List[Any] =(
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_A : Optional[Any] =list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_A : List[Any] =basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 721 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase_ ( _A : str ):
"""simple docstring"""
return x + 2
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = "x = 3"
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Tuple = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
assert result == 3
self.assertDictEqual(__lowerCamelCase , {"x": 3} )
lowerCamelCase__ : Dict = "x = y"
lowerCamelCase__ : List[Any] = {"y": 5}
lowerCamelCase__ : List[Any] = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCamelCase , {"x": 5, "y": 5} )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Tuple = "y = add_two(x)"
lowerCamelCase__ : Dict = {"x": 3}
lowerCamelCase__ : Optional[int] = evaluate(__lowerCamelCase , {"add_two": add_two} , state=__lowerCamelCase )
assert result == 5
self.assertDictEqual(__lowerCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase__ : Union[str, Any] = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = "x = 3"
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Tuple = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
assert result == 3
self.assertDictEqual(__lowerCamelCase , {"x": 3} )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[str] = "test_dict = {'x': x, 'y': add_two(x)}"
lowerCamelCase__ : List[Any] = {"x": 3}
lowerCamelCase__ : Any = evaluate(__lowerCamelCase , {"add_two": add_two} , state=__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(__lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : str = "x = 3\ny = 5"
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Any = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCamelCase , {"x": 3, "y": 5} )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = "text = f'This is x: {x}.'"
lowerCamelCase__ : Union[str, Any] = {"x": 3}
lowerCamelCase__ : Optional[int] = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__lowerCamelCase , {"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Tuple = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowerCamelCase__ : Any = {"x": 3}
lowerCamelCase__ : Union[str, Any] = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__lowerCamelCase , {"x": 3, "y": 2} )
lowerCamelCase__ : Union[str, Any] = {"x": 8}
lowerCamelCase__ : Any = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowerCamelCase , {"x": 8, "y": 5} )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : str = "test_list = [x, add_two(x)]"
lowerCamelCase__ : Dict = {"x": 3}
lowerCamelCase__ : List[str] = evaluate(__lowerCamelCase , {"add_two": add_two} , state=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [3, 5] )
self.assertDictEqual(__lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Any = "y = x"
lowerCamelCase__ : Any = {"x": 3}
lowerCamelCase__ : Optional[int] = evaluate(__lowerCamelCase , {} , state=__lowerCamelCase )
assert result == 3
self.assertDictEqual(__lowerCamelCase , {"x": 3, "y": 3} )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[str] = "test_list = [x, add_two(x)]\ntest_list[1]"
lowerCamelCase__ : int = {"x": 3}
lowerCamelCase__ : Union[str, Any] = evaluate(__lowerCamelCase , {"add_two": add_two} , state=__lowerCamelCase )
assert result == 5
self.assertDictEqual(__lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
lowerCamelCase__ : List[str] = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowerCamelCase__ : Optional[Any] = {"x": 3}
lowerCamelCase__ : Optional[int] = evaluate(__lowerCamelCase , {"add_two": add_two} , state=__lowerCamelCase )
assert result == 5
self.assertDictEqual(__lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = "x = 0\nfor i in range(3):\n x = i"
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : int = evaluate(__lowerCamelCase , {"range": range} , state=__lowerCamelCase )
assert result == 2
self.assertDictEqual(__lowerCamelCase , {"x": 2, "i": 2} )
| 5 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = KandinskyVaaImgaImgPipeline
A__ = ["image_embeds", "negative_image_embeds", "image"]
A__ = [
"image_embeds",
"negative_image_embeds",
"image",
]
A__ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A__ = False
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.dummy_unet
lowerCamelCase__ : Optional[Any] = self.dummy_movq
lowerCamelCase__ : Optional[int] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = "cpu"
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase )
lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCamelCase__ : List[str] = output.images
lowerCamelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCamelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCamelCase__ : Any = "A red cartoon frog, 4k"
lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCamelCase__ : str = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCamelCase__ : Optional[Any] = pipeline(
image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCamelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 5 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.