code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__lowerCAmelCase : int ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCAmelCase : Union[str, Any] ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Any = numpy_to_pil(lowercase__ )
return images
def _UpperCamelCase ( lowercase__ ):
if images.ndim == 3:
__SCREAMING_SNAKE_CASE : str = images[None, ...]
__SCREAMING_SNAKE_CASE : Optional[int] = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
__SCREAMING_SNAKE_CASE : str = [Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = OPTConfig
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Dict = '''gelu'''
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=13 , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Optional[int]=99 , lowerCAmelCase__ :int=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Any=20 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :int=16 , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Tuple = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : Tuple = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = eos_token_id
__SCREAMING_SNAKE_CASE : int = pad_token_id
__SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = embed_dim
__SCREAMING_SNAKE_CASE : Optional[int] = word_embed_proj_dim
__SCREAMING_SNAKE_CASE : str = False
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
__SCREAMING_SNAKE_CASE : Any = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = TFOPTModel(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Optional[int] = input_ids[:1, :]
__SCREAMING_SNAKE_CASE : List[str] = inputs_dict['''attention_mask'''][:1, :]
__SCREAMING_SNAKE_CASE : Tuple = 1
# first forward pass
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE : Any = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : str = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[Any] = 10
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFOPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ )
def __magic_name__( self :str ) -> Optional[int]:
self.config_tester.run_common_tests()
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ):
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__SCREAMING_SNAKE_CASE : int = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__SCREAMING_SNAKE_CASE : str = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__ )
# check that weights remain the same after resizing
__SCREAMING_SNAKE_CASE : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = False
self.assertTrue(lowerCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__SCREAMING_SNAKE_CASE : Any = False
self.assertTrue(lowerCAmelCase__ )
def _UpperCamelCase ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 99
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Dict = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__SCREAMING_SNAKE_CASE : List[str] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__SCREAMING_SNAKE_CASE : Any = input_ids.shape[0]
__SCREAMING_SNAKE_CASE : int = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__SCREAMING_SNAKE_CASE : List[str] = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__SCREAMING_SNAKE_CASE : str = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
__SCREAMING_SNAKE_CASE : List[Any] = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE : str = (1, 11, 512)
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3 ) )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = xla_generate(lowerCAmelCase__ , lowerCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2 ) )
@require_tf
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Any ) -> Tuple:
super().setUp()
__SCREAMING_SNAKE_CASE : Dict = '''facebook/opt-350m'''
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = TFOPTForCausalLM.from_pretrained(self.path_model )
__SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(self.path_model )
__SCREAMING_SNAKE_CASE : List[Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__SCREAMING_SNAKE_CASE : Any = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
__SCREAMING_SNAKE_CASE : Any = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
@require_tf
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def __magic_name__( self :str ) -> Tuple:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = '''facebook/opt-125m'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__SCREAMING_SNAKE_CASE : List[str] = model.generate(lowerCAmelCase__ , max_length=10 )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = '''facebook/opt-350m'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = '''left'''
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE : Any = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__SCREAMING_SNAKE_CASE : int = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = inputs['''input_ids''']
__SCREAMING_SNAKE_CASE : int = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''] )
__SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
def __magic_name__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''facebook/opt-350m'''
__SCREAMING_SNAKE_CASE : Any = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : int = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__SCREAMING_SNAKE_CASE : Optional[int] = model.generate(lowerCAmelCase__ , max_length=10 )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = len(lowercase__ )
__SCREAMING_SNAKE_CASE : int = len(lowercase__ )
__SCREAMING_SNAKE_CASE : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE : list = []
for char_count in range(lowercase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowercase__ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Dict=99 , lowerCAmelCase__ :Any=64 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :List[Any]=5 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Union[str, Any]=512 , lowerCAmelCase__ :Dict=16 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :Optional[Any]=0.02 , lowerCAmelCase__ :List[str]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Optional[Any]=None , ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : List[Any] = is_training
__SCREAMING_SNAKE_CASE : Dict = use_input_mask
__SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = embedding_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : List[str] = num_choices
__SCREAMING_SNAKE_CASE : Dict = scope
def __magic_name__( self :str ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__( self :Optional[int] ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = MobileBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileBertForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = MobileBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = self.num_choices
__SCREAMING_SNAKE_CASE : str = MobileBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[int] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__( self :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def __magic_name__( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any=False ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = MobileBertModelTester(self )
__SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__( self :Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase__ )
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase__ )
def _UpperCamelCase ( lowercase__ ):
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
__lowerCAmelCase : Any =1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=lowerCAmelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__SCREAMING_SNAKE_CASE : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any =get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = BartphoTokenizer
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def __magic_name__( self :Dict ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : str = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__SCREAMING_SNAKE_CASE : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Tuple = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
__SCREAMING_SNAKE_CASE : Tuple = BartphoTokenizer(lowerCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = '''This is a là test'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = BartphoTokenizer(lowerCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE : Tuple = '''This is a là test'''
__SCREAMING_SNAKE_CASE : List[Any] = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE : Optional[int] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = '''longformer'''
def __init__( self :List[Any] , lowerCAmelCase__ :Union[List[int], int] = 512 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 30_522 , lowerCAmelCase__ :int = 768 , lowerCAmelCase__ :int = 12 , lowerCAmelCase__ :int = 12 , lowerCAmelCase__ :int = 3_072 , lowerCAmelCase__ :str = "gelu" , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :int = 512 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :float = 1E-1_2 , lowerCAmelCase__ :bool = False , **lowerCAmelCase__ :Union[str, Any] , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_window
__SCREAMING_SNAKE_CASE : str = sep_token_id
__SCREAMING_SNAKE_CASE : Tuple = bos_token_id
__SCREAMING_SNAKE_CASE : Any = eos_token_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = onnx_export
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :"PretrainedConfig" , lowerCAmelCase__ :str = "default" , lowerCAmelCase__ :"List[PatchingSpec]" = None ) -> Tuple:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = True
@property
def __magic_name__( self :Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __magic_name__( self :str ) -> Mapping[str, Mapping[int, str]]:
__SCREAMING_SNAKE_CASE : Optional[Any] = super().outputs
if self.task == "default":
__SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch'''}
return outputs
@property
def __magic_name__( self :str ) -> float:
return 1E-4
@property
def __magic_name__( self :Union[str, Any] ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :"PreTrainedTokenizerBase" , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE : int = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__SCREAMING_SNAKE_CASE : str = 1
return inputs
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] ={
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase : List[str] =random.Random()
def _UpperCamelCase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__SCREAMING_SNAKE_CASE : Tuple = global_rng
__SCREAMING_SNAKE_CASE : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :Any=400 , lowerCAmelCase__ :Optional[int]=2_000 , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Tuple=160 , lowerCAmelCase__ :Any=8 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=4_000 , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=True , ) -> int:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_seq_length
__SCREAMING_SNAKE_CASE : Dict = max_seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE : Optional[Any] = padding_value
__SCREAMING_SNAKE_CASE : Optional[Any] = sampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
__SCREAMING_SNAKE_CASE : List[str] = do_normalize
__SCREAMING_SNAKE_CASE : Any = feature_size
__SCREAMING_SNAKE_CASE : List[str] = chunk_length
__SCREAMING_SNAKE_CASE : Any = hop_length
def __magic_name__( self :Optional[int] ) -> Tuple:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[str]=False ) -> List[str]:
def _flatten(lowerCAmelCase__ :Union[str, Any] ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
__SCREAMING_SNAKE_CASE : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = WhisperFeatureExtractor if is_speech_available() else None
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = WhisperFeatureExtractionTester(self )
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[Any] = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0]
check_json_file_has_correct_format(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE : Dict = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(lowerCAmelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class.from_json_file(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE : Any = feat_extract_first.mel_filters
__SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE : List[str] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
__SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCAmelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__SCREAMING_SNAKE_CASE : int = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE : str = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__SCREAMING_SNAKE_CASE : Any = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE : Dict = np.asarray(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test truncation required
__SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
__SCREAMING_SNAKE_CASE : Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs_truncated]
__SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__SCREAMING_SNAKE_CASE : int = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
import torch
__SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__SCREAMING_SNAKE_CASE : str = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __magic_name__( self :str , lowerCAmelCase__ :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE : str = ds.sort('''id''' ).select(range(lowerCAmelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __magic_name__( self :str ) -> List[str]:
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__SCREAMING_SNAKE_CASE : List[Any] = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE : List[str] = WhisperFeatureExtractor()
__SCREAMING_SNAKE_CASE : int = feature_extractor(lowerCAmelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
def __magic_name__( self :Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE : List[str] = self._load_datasamples(1 )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__SCREAMING_SNAKE_CASE : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ ) - 1 ) < 1E-3 ) )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = int(lowercase__ )
assert noofclusters < len(lowercase__ )
# Find out the dimensionality
__SCREAMING_SNAKE_CASE : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(len(lowercase__ ) ) )
shuffle(lowercase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__SCREAMING_SNAKE_CASE : Dict = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__SCREAMING_SNAKE_CASE : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__SCREAMING_SNAKE_CASE : str = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__SCREAMING_SNAKE_CASE : Tuple = tf.placeholder('''float64''' , [dim] )
__SCREAMING_SNAKE_CASE : List[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__SCREAMING_SNAKE_CASE : Optional[Any] = [tf.Variable(0 ) for i in range(len(lowercase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__SCREAMING_SNAKE_CASE : int = tf.placeholder('''int32''' )
__SCREAMING_SNAKE_CASE : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__SCREAMING_SNAKE_CASE : str = tf.reduce_mean(lowercase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__SCREAMING_SNAKE_CASE : Any = tf.placeholder('''float''' , [dim] )
__SCREAMING_SNAKE_CASE : Tuple = tf.placeholder('''float''' , [dim] )
__SCREAMING_SNAKE_CASE : List[str] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase__ , lowercase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__SCREAMING_SNAKE_CASE : int = tf.placeholder('''float''' , [noofclusters] )
__SCREAMING_SNAKE_CASE : List[str] = tf.argmin(lowercase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__SCREAMING_SNAKE_CASE : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__SCREAMING_SNAKE_CASE : Optional[Any] = 100
for _ in range(lowercase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__SCREAMING_SNAKE_CASE : Optional[int] = [
sess.run(lowercase__ , feed_dict={va: vect, va: sess.run(lowercase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__SCREAMING_SNAKE_CASE : Tuple = sess.run(
lowercase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase__ ):
# Collect all the vectors assigned to this cluster
__SCREAMING_SNAKE_CASE : Dict = [
vectors[i]
for i in range(len(lowercase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__SCREAMING_SNAKE_CASE : Tuple = sess.run(
lowercase__ , feed_dict={mean_input: array(lowercase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__SCREAMING_SNAKE_CASE : Any = sess.run(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = sess.run(lowercase__ )
return centroids, assignments
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE : str = BertConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = BertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Any =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''Speech2TextFeatureExtractor'''
SCREAMING_SNAKE_CASE__ : Tuple = '''Speech2TextTokenizer'''
def __init__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.feature_extractor
__SCREAMING_SNAKE_CASE : int = False
def __call__( self :int , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''raw_speech''' )
else:
__SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''audio''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''sampling_rate''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''text''' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE : List[str] = args[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE : str = encodings['''input_ids''']
return inputs
def __magic_name__( self :Union[str, Any] , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :int , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Optional[Any] ) -> int:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer
yield
__SCREAMING_SNAKE_CASE : Dict = self.feature_extractor
__SCREAMING_SNAKE_CASE : Any = False
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''beit'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :int=8_192 , lowerCAmelCase__ :str=768 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :List[str]=12 , lowerCAmelCase__ :int=3_072 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :Dict=1E-1_2 , lowerCAmelCase__ :Dict=224 , lowerCAmelCase__ :Optional[Any]=16 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :str=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :int=[3, 5, 7, 11] , lowerCAmelCase__ :Union[str, Any]=[1, 2, 3, 6] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Any=0.4 , lowerCAmelCase__ :Union[str, Any]=256 , lowerCAmelCase__ :str=1 , lowerCAmelCase__ :int=False , lowerCAmelCase__ :List[Any]=255 , **lowerCAmelCase__ :str , ) -> str:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : int = intermediate_size
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE : Any = use_mask_token
__SCREAMING_SNAKE_CASE : int = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE : Tuple = use_relative_position_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE : Dict = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Dict = drop_path_rate
__SCREAMING_SNAKE_CASE : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : int = out_indices
__SCREAMING_SNAKE_CASE : Any = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
__SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE : str = auxiliary_channels
__SCREAMING_SNAKE_CASE : List[str] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE : str = auxiliary_concat_input
__SCREAMING_SNAKE_CASE : int = semantic_loss_ignore_index
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = version.parse('''1.11''' )
@property
def __magic_name__( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__( self :Tuple ) -> float:
return 1E-4
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowercase ( A__ ):
'''simple docstring'''
@require_torch
def __magic_name__( self :Any ) -> Optional[int]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__SCREAMING_SNAKE_CASE : Optional[int] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__SCREAMING_SNAKE_CASE : List[str] = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__SCREAMING_SNAKE_CASE : Dict = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task='''fill-mask''' , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE : Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE : Optional[int] = '''1'''
__SCREAMING_SNAKE_CASE : str = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__SCREAMING_SNAKE_CASE : str = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task='''fill-mask''' , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE : str = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__SCREAMING_SNAKE_CASE : Dict = self.get_env()
__SCREAMING_SNAKE_CASE : Any = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __magic_name__( self :List[Any] ) -> str:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__SCREAMING_SNAKE_CASE : Tuple = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
__SCREAMING_SNAKE_CASE : Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE : Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__SCREAMING_SNAKE_CASE : Tuple = self.get_env()
__SCREAMING_SNAKE_CASE : List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
__SCREAMING_SNAKE_CASE : List[str] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE : Dict = '''1'''
__SCREAMING_SNAKE_CASE : List[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Any = '''
from transformers import pipeline
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_env()
__SCREAMING_SNAKE_CASE : List[str] = '''1'''
__SCREAMING_SNAKE_CASE : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
__SCREAMING_SNAKE_CASE : str = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''
from transformers import AutoModel
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
__SCREAMING_SNAKE_CASE : Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__SCREAMING_SNAKE_CASE : str = self.get_env()
__SCREAMING_SNAKE_CASE : Dict = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__SCREAMING_SNAKE_CASE : str = '''1'''
__SCREAMING_SNAKE_CASE : Any = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowerCAmelCase : int ='Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _UpperCamelCase ( lowercase__=None ):
if subparsers is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__SCREAMING_SNAKE_CASE : Dict = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=lowercase__ , default=lowercase__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=lowercase__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=lowercase__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__SCREAMING_SNAKE_CASE : List[str] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=lowercase__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase__ ):
__SCREAMING_SNAKE_CASE : str = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__SCREAMING_SNAKE_CASE : Union[str, Any] = defaults.command_file
if not args.command and defaults.commands is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = defaults.commands
if not args.tpu_name:
__SCREAMING_SNAKE_CASE : List[Any] = defaults.tpu_name
if not args.tpu_zone:
__SCREAMING_SNAKE_CASE : int = defaults.tpu_zone
if args.accelerate_version == "dev":
__SCREAMING_SNAKE_CASE : Dict = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__SCREAMING_SNAKE_CASE : List[str] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
__SCREAMING_SNAKE_CASE : str = '''; '''.join(lowercase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__SCREAMING_SNAKE_CASE : List[str] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {' '.join(lowercase__ )}''' )
return
subprocess.run(lowercase__ )
print('''Successfully setup pod.''' )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = tpu_command_parser()
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
tpu_command_launcher(lowercase__ )
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : str ={
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''van'''
def __init__( self :Tuple , lowerCAmelCase__ :int=224 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Dict=[7, 3, 3, 3] , lowerCAmelCase__ :Optional[Any]=[4, 2, 2, 2] , lowerCAmelCase__ :Optional[int]=[64, 128, 320, 512] , lowerCAmelCase__ :Any=[3, 3, 12, 3] , lowerCAmelCase__ :List[str]=[8, 8, 4, 4] , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Any=1E-6 , lowerCAmelCase__ :Union[str, Any]=1E-2 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :Any=0.0 , **lowerCAmelCase__ :int , ) -> Optional[int]:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = image_size
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : List[str] = patch_sizes
__SCREAMING_SNAKE_CASE : Any = strides
__SCREAMING_SNAKE_CASE : str = hidden_sizes
__SCREAMING_SNAKE_CASE : List[str] = depths
__SCREAMING_SNAKE_CASE : int = mlp_ratios
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_scale_init_value
__SCREAMING_SNAKE_CASE : Any = drop_path_rate
__SCREAMING_SNAKE_CASE : str = dropout_rate
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Optional[Any] =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = True
def __magic_name__( self :List[str] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE : Optional[Any] = XLMProphetNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = '''[PAD]'''
__SCREAMING_SNAKE_CASE : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_012 )
def __magic_name__( self :List[Any] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def __magic_name__( self :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = XLMProphetNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __magic_name__( self :Any ) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = '''Hello World!'''
__SCREAMING_SNAKE_CASE : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __magic_name__( self :int ) -> Dict:
# fmt: off
__SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
if len(lowercase__ ) == 0:
return []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = min(lowercase__ ), max(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = int(max_value - min_value ) + 1
__SCREAMING_SNAKE_CASE : list[list] = [[] for _ in range(lowercase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase__ )
return [v for bucket in buckets for v in sorted(lowercase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Any ={
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class _lowercase :
'''simple docstring'''
def __init__( self :Tuple , lowerCAmelCase__ :int = 14 ) -> None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
__SCREAMING_SNAKE_CASE : int = primes[group]['''prime''']
__SCREAMING_SNAKE_CASE : Optional[Any] = primes[group]['''generator''']
__SCREAMING_SNAKE_CASE : int = int(hexlify(urandom(32 ) ) , base=16 )
def __magic_name__( self :Optional[int] ) -> str:
return hex(self.__private_key )[2:]
def __magic_name__( self :int ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCAmelCase__ )[2:]
def __magic_name__( self :str , lowerCAmelCase__ :int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(lowerCAmelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str ) -> str:
__SCREAMING_SNAKE_CASE : str = int(lowerCAmelCase__ , base=16 )
if not self.is_valid_public_key(lowerCAmelCase__ ):
raise ValueError('''Invalid public key''' )
__SCREAMING_SNAKE_CASE : str = pow(lowerCAmelCase__ , self.__private_key , self.prime )
return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest()
@staticmethod
def __magic_name__( lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCAmelCase__ , (prime - 1) // 2 , lowerCAmelCase__ ) == 1
)
@staticmethod
def __magic_name__( lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int = 14 ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(lowerCAmelCase__ , base=16 )
__SCREAMING_SNAKE_CASE : Dict = int(lowerCAmelCase__ , base=16 )
__SCREAMING_SNAKE_CASE : int = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Invalid public key''' )
__SCREAMING_SNAKE_CASE : Tuple = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__lowerCAmelCase : int =None
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Dict ={
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__lowerCAmelCase : int ={
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : int = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ : Optional[int] = TaTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self :str , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :Any="<unk>" , lowerCAmelCase__ :Any="<pad>" , lowerCAmelCase__ :Dict=100 , lowerCAmelCase__ :Dict=None , **lowerCAmelCase__ :Optional[int] , ) -> Optional[int]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : str = [f'''<extra_id_{i}>''' for i in range(lowerCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__SCREAMING_SNAKE_CASE : List[Any] = len(set(filter(lambda lowerCAmelCase__ : bool('''extra_id_''' in str(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_file
__SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True
__SCREAMING_SNAKE_CASE : Tuple = extra_ids
@staticmethod
def __magic_name__( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowerCAmelCase__ , )
return max_model_length
def __magic_name__( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__SCREAMING_SNAKE_CASE : List[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __magic_name__( self :Tuple ) -> Optional[int]:
return list(
set(filter(lambda lowerCAmelCase__ : bool(re.search(r'''<extra_id_\d+>''' , lowerCAmelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __magic_name__( self :List[str] ) -> Dict:
return [self.convert_tokens_to_ids(lowerCAmelCase__ ) for token in self.get_sentinel_tokens()]
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_0_0, 0.2_5) = }""")
print(f"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = '''down'''
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[str] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = '''down'''
def __magic_name__( self :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''down'''
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = '''down'''
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
return init_dict, inputs_dict
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = '''down'''
@property
def __magic_name__( self :Tuple ) -> Union[str, Any]:
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ )
def __magic_name__( self :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __magic_name__( self :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = '''down'''
@property
def __magic_name__( self :str ) -> Tuple:
return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = '''down'''
@property
def __magic_name__( self :str ) -> Optional[int]:
return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = '''down'''
@property
def __magic_name__( self :Any ) -> List[Any]:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __magic_name__( self :Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = '''down'''
@property
def __magic_name__( self :str ) -> Any:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = '''mid'''
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[str] = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = '''mid'''
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[int] = 32
return init_dict, inputs_dict
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : int = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = '''mid'''
@property
def __magic_name__( self :List[str] ) -> Optional[int]:
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ )
def __magic_name__( self :int ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[str] = 32
return init_dict, inputs_dict
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : int = '''up'''
@property
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''up'''
@property
def __magic_name__( self :str ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = '''up'''
@property
def __magic_name__( self :Optional[Any] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Tuple = 32
return init_dict, inputs_dict
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = '''up'''
@property
def __magic_name__( self :Optional[int] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ , include_encoder_hidden_states=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Dict = 32
return init_dict, inputs_dict
def __magic_name__( self :List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''up'''
@property
def __magic_name__( self :Any ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = '''up'''
@property
def __magic_name__( self :str ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = '''up'''
@property
def __magic_name__( self :Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : int = '''up'''
@property
def __magic_name__( self :List[str] ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = {'''in_channels''': 32, '''out_channels''': 32}
__SCREAMING_SNAKE_CASE : Any = self.dummy_input
return init_dict, inputs_dict
def __magic_name__( self :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(lowerCAmelCase__ )
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : int = '''up'''
@property
def __magic_name__( self :List[str] ) -> str:
return super().get_dummy_input(include_temb=lowerCAmelCase__ )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = {'''in_channels''': 32, '''out_channels''': 32}
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def __magic_name__( self :Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(lowerCAmelCase__ )
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
return str(lowercase__ ) == str(lowercase__ )[::-1]
def _UpperCamelCase ( lowercase__ ):
return int(lowercase__ ) + int(str(lowercase__ )[::-1] )
def _UpperCamelCase ( lowercase__ = 10000 ):
__SCREAMING_SNAKE_CASE : int = []
for num in range(1 , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : List[str] = num
while iterations < 50:
__SCREAMING_SNAKE_CASE : Any = sum_reverse(lowercase__ )
iterations += 1
if is_palindrome(lowercase__ ):
break
else:
lychrel_nums.append(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=5 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :List[str]=37 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Tuple=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : str = use_input_mask
__SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE : str = use_labels
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : int = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size - 1
def __magic_name__( self :int ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __magic_name__( self :Optional[int] ) -> Dict:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : str = True
return config, input_ids, input_mask, token_labels
def __magic_name__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : str = GPTNeoXModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : str = GPTNeoXModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : int = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : str = GPTNeoXForQuestionAnswering(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : List[str] = GPTNeoXForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = self.num_labels
__SCREAMING_SNAKE_CASE : int = GPTNeoXForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : Dict = GPTNeoXForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE : Optional[int] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def __magic_name__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = GPTNeoXModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=64 , num_attention_heads=8 )
def __magic_name__( self :Any ) -> str:
self.config_tester.run_common_tests()
def __magic_name__( self :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def __magic_name__( self :Any ) -> int:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __magic_name__( self :Dict , lowerCAmelCase__ :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([1, 10] , config.vocab_size )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : List[str] = GPTNeoXModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = original_model(lowerCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE : Dict = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : int = {'''type''': scaling_type, '''factor''': 10.0}
__SCREAMING_SNAKE_CASE : Dict = GPTNeoXModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
__SCREAMING_SNAKE_CASE : Tuple = scaled_model(lowerCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE : Any = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__SCREAMING_SNAKE_CASE : Dict = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
__SCREAMING_SNAKE_CASE : List[str] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=20 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__lowerCAmelCase : Optional[int] ={
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__lowerCAmelCase : int =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''maskformer'''
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''mask_feature_size'''}
SCREAMING_SNAKE_CASE__ : Tuple = ['''resnet''', '''swin''']
SCREAMING_SNAKE_CASE__ : List[str] = ['''detr''']
def __init__( self :List[Any] , lowerCAmelCase__ :int = 256 , lowerCAmelCase__ :int = 256 , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[Dict] = None , lowerCAmelCase__ :Optional[Dict] = None , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :float = 1.0 , lowerCAmelCase__ :float = 1.0 , lowerCAmelCase__ :float = 1.0 , lowerCAmelCase__ :float = 20.0 , lowerCAmelCase__ :Optional[bool] = None , **lowerCAmelCase__ :Optional[Any] , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__SCREAMING_SNAKE_CASE : Any = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = backbone_config.pop('''model_type''' )
__SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : List[str] = config_class.from_dict(lowerCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__SCREAMING_SNAKE_CASE : List[str] = DetrConfig()
else:
# verify that the decoder is supported
__SCREAMING_SNAKE_CASE : Dict = (
decoder_config.pop('''model_type''' ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[decoder_type]
__SCREAMING_SNAKE_CASE : int = config_class.from_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_config
# main feature dimension for the model
__SCREAMING_SNAKE_CASE : Optional[Any] = fpn_feature_size
__SCREAMING_SNAKE_CASE : int = mask_feature_size
# initializer
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : int = init_xavier_std
# Hungarian matcher && loss
__SCREAMING_SNAKE_CASE : Union[str, Any] = cross_entropy_weight
__SCREAMING_SNAKE_CASE : List[Any] = dice_weight
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_weight
__SCREAMING_SNAKE_CASE : str = use_auxiliary_loss
__SCREAMING_SNAKE_CASE : Dict = no_object_weight
__SCREAMING_SNAKE_CASE : Tuple = output_auxiliary_logits
__SCREAMING_SNAKE_CASE : Any = self.decoder_config.encoder_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] , lowerCAmelCase__ :PretrainedConfig , lowerCAmelCase__ :PretrainedConfig , **lowerCAmelCase__ :Any ) -> Tuple:
return cls(
backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Optional[int] ) -> Dict[str, any]:
__SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : List[str] = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.decoder_config.to_dict()
__SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = iter(lowercase__ )
while True:
__SCREAMING_SNAKE_CASE : List[str] = tuple(itertools.islice(lowercase__ , lowercase__ ) )
if not chunk:
return
yield chunk
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
if len(lowercase__ ) < 2:
return dirty
for i in range(len(lowercase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase__ ) & 1:
clean += "X"
return clean
def _UpperCamelCase ( lowercase__ ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__SCREAMING_SNAKE_CASE : str = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase__ )
return table
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = generate_table(lowercase__ )
__SCREAMING_SNAKE_CASE : int = prepare_input(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase__ , 2 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(lowercase__ ) , 5 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = divmod(table.index(lowercase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = generate_table(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase__ , 2 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = divmod(table.index(lowercase__ ) , 5 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(lowercase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] ={
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : str = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : List[str] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , **lowerCAmelCase__ :str ) -> Optional[int]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **lowerCAmelCase__ )
def __magic_name__( self :Dict , **lowerCAmelCase__ :List[Any] ) -> List[str]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Optional[int] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :List[Any] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__( self :Any ) -> int:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> int:
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : int = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : Tuple = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = '''lower newer'''
__SCREAMING_SNAKE_CASE : Tuple = processor(text=lowerCAmelCase__ , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : int = tokenizer(lowerCAmelCase__ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = '''lower newer'''
__SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : str = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = '''google/owlvit-base-patch32'''
__SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''cat''', '''nasa badge''']
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = '''google/owlvit-base-patch32'''
__SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = [['''cat''', '''nasa badge'''], ['''person''']]
__SCREAMING_SNAKE_CASE : int = processor(text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 16
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = max([len(lowerCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''google/owlvit-base-patch32'''
__SCREAMING_SNAKE_CASE : str = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = ['''cat''', '''nasa badge''']
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 16
__SCREAMING_SNAKE_CASE : Any = inputs['''input_ids''']
__SCREAMING_SNAKE_CASE : Optional[Any] = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __magic_name__( self :List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[Any] = processor(images=lowerCAmelCase__ , query_images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Tuple = processor.batch_decode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = KandinskyVaaInpaintPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
SCREAMING_SNAKE_CASE__ : Tuple = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
SCREAMING_SNAKE_CASE__ : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE__ : int = False
@property
def __magic_name__( self :int ) -> Any:
return 32
@property
def __magic_name__( self :List[str] ) -> List[Any]:
return 32
@property
def __magic_name__( self :Dict ) -> Tuple:
return self.time_input_dim
@property
def __magic_name__( self :List[str] ) -> List[str]:
return self.time_input_dim * 4
@property
def __magic_name__( self :List[Any] ) -> int:
return 100
@property
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def __magic_name__( self :Dict ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__( self :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : str = self.dummy_unet
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_movq
__SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
__SCREAMING_SNAKE_CASE : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE : int = 0
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = '''cpu'''
__SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : str = output.images
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__SCREAMING_SNAKE_CASE : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''a hat'''
__SCREAMING_SNAKE_CASE : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : List[str] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE : Any = pipeline(
image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE__ : Any = TF_MODEL_FOR_MASKED_LM_MAPPING
def __magic_name__( self :str ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : str = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
__SCREAMING_SNAKE_CASE : List[str] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-0_5, '''token''': 38_015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-0_5, '''token''': 25_506, '''token_str''': ''' accuser'''},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-0_5,
'''token''': 38_015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-0_5,
'''token''': 25_506,
'''token_str''': ''' accuser''',
},
] , )
__SCREAMING_SNAKE_CASE : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-0_5, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-0_5, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-0_5, '''token''': 2_941, '''token_str''': ''' Te'''},
] , )
@require_torch
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-0_5, '''token''': 35_676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-0_5, '''token''': 16_416, '''token_str''': '''ELS'''},
] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-0_5,
'''token''': 35_676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-0_5, '''token''': 16_416, '''token_str''': '''ELS'''},
] , )
__SCREAMING_SNAKE_CASE : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-0_5, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-0_5, '''token''': 2_941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-0_5, '''token''': 13_606, '''token_str''': ''' Clara'''},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=6 ) , [
[
{
'''score''': 2.2E-0_5,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-0_5, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-0_5,
'''token''': 35_676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-0_5, '''token''': 16_416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def __magic_name__( self :Any ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
__SCREAMING_SNAKE_CASE : int = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_torch
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(lowerCAmelCase__ )
@slow
@require_tf
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_573, '''token_str''': ''' Chris'''},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2_201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 12_790,
'''token_str''': ''' Lyon''',
},
] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 13_606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_941, '''token_str''': ''' Te'''},
] , )
@require_torch
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Dict = None
self.run_pipeline_test(lowerCAmelCase__ , [] )
@require_tf
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.run_pipeline_test(lowerCAmelCase__ , [] )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = fill_masker.tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = fill_masker.model
__SCREAMING_SNAKE_CASE : Any = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
lowerCAmelCase__ , [
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowerCAmelCase__ , [
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
],
[
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
],
] , )
with self.assertRaises(lowerCAmelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCAmelCase__ ):
fill_masker('''This is''' )
self.run_test_top_k(lowerCAmelCase__ , lowerCAmelCase__ )
self.run_test_targets(lowerCAmelCase__ , lowerCAmelCase__ )
self.run_test_top_k_targets(lowerCAmelCase__ , lowerCAmelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase__ , lowerCAmelCase__ )
self.fill_mask_with_multiple_masks(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE : List[str] = sorted(vocab.keys() )[:2]
# Pipeline argument
__SCREAMING_SNAKE_CASE : List[str] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , targets=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase__ , [
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
] , )
__SCREAMING_SNAKE_CASE : Any = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(lowerCAmelCase__ ) )
# Call argument
__SCREAMING_SNAKE_CASE : Tuple = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
] , )
__SCREAMING_SNAKE_CASE : List[str] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(lowerCAmelCase__ ) )
# Score equivalence
__SCREAMING_SNAKE_CASE : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = [top_mask['''token_str'''] for top_mask in outputs]
__SCREAMING_SNAKE_CASE : Tuple = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase__ ) == set(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , nested_simplify(lowerCAmelCase__ ) )
# Raises with invalid
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , top_k=2 )
__SCREAMING_SNAKE_CASE : Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase__ , [
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
] , )
__SCREAMING_SNAKE_CASE : Tuple = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
] , )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , nested_simplify(lowerCAmelCase__ ) )
def __magic_name__( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
__SCREAMING_SNAKE_CASE : Optional[int] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# top_k=2, ntargets=3
__SCREAMING_SNAKE_CASE : List[str] = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowerCAmelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__SCREAMING_SNAKE_CASE : List[str] = [el['''token_str'''] for el in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["score"] , reverse=lowerCAmelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase__ ).issubset(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowerCAmelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , nested_simplify(lowerCAmelCase__ ) )
def __magic_name__( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Any = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.get_vocab()
# String duplicates + id duplicates
__SCREAMING_SNAKE_CASE : Dict = sorted(vocab.keys() )[:3]
__SCREAMING_SNAKE_CASE : Optional[int] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__SCREAMING_SNAKE_CASE : Tuple = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=lowerCAmelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCAmelCase__ ) , 3 )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
],
[
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
],
[
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
{'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )},
],
] , )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[Any] , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :Optional[Any] ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ): # noqa: E741
__SCREAMING_SNAKE_CASE : Dict = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[str] = [0] * n
__SCREAMING_SNAKE_CASE : int = [False] * n
__SCREAMING_SNAKE_CASE : Union[str, Any] = [False] * n
def dfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if parent == root:
out_edge_count += 1
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__SCREAMING_SNAKE_CASE : Optional[Any] = dfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__SCREAMING_SNAKE_CASE : Optional[int] = True
# AP found via cycle
if at == low[to]:
__SCREAMING_SNAKE_CASE : Optional[int] = True
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , lowercase__ )
return out_edge_count
for i in range(lowercase__ ):
if not visited[i]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : int = dfs(lowercase__ , lowercase__ , -1 , lowercase__ )
__SCREAMING_SNAKE_CASE : Any = out_edge_count > 1
for x in range(len(lowercase__ ) ):
if is_art[x] is True:
print(lowercase__ )
# Adjacency list of graph
__lowerCAmelCase : List[Any] ={
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : Any ={
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ['''input_features''', '''attention_mask''']
def __init__( self :Any , lowerCAmelCase__ :Tuple=80 , lowerCAmelCase__ :Dict=16_000 , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=25 , lowerCAmelCase__ :List[str]="hamming_window" , lowerCAmelCase__ :List[Any]=3_2768.0 , lowerCAmelCase__ :str=0.97 , lowerCAmelCase__ :int=1.0 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Dict=False , **lowerCAmelCase__ :Any , ) -> List[Any]:
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = feature_size
__SCREAMING_SNAKE_CASE : str = sampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = padding_value
__SCREAMING_SNAKE_CASE : Optional[int] = hop_length
__SCREAMING_SNAKE_CASE : int = win_length
__SCREAMING_SNAKE_CASE : Dict = frame_signal_scale
__SCREAMING_SNAKE_CASE : Any = preemphasis_coeff
__SCREAMING_SNAKE_CASE : Tuple = mel_floor
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_means
__SCREAMING_SNAKE_CASE : Any = normalize_vars
__SCREAMING_SNAKE_CASE : Optional[int] = win_function
__SCREAMING_SNAKE_CASE : str = return_attention_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = win_length * sampling_rate // 1_000
__SCREAMING_SNAKE_CASE : int = hop_length * sampling_rate // 1_000
__SCREAMING_SNAKE_CASE : Dict = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : Optional[int] = (self.n_fft // 2) + 1
def __magic_name__( self :int , lowerCAmelCase__ :np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
__SCREAMING_SNAKE_CASE : Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : int = window_function(window_length=self.sample_size , name=self.win_function )
__SCREAMING_SNAKE_CASE : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__SCREAMING_SNAKE_CASE : int = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCAmelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCAmelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCAmelCase__ , mel_floor=self.mel_floor , log_mel='''log''' , )
return msfc_features.T
def __magic_name__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] ) -> Any:
# make sure we normalize float32 arrays
if self.normalize_means:
__SCREAMING_SNAKE_CASE : Dict = x[:input_length].mean(axis=0 )
__SCREAMING_SNAKE_CASE : Any = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if self.normalize_vars:
__SCREAMING_SNAKE_CASE : Union[str, Any] = x[:input_length].std(axis=0 )
__SCREAMING_SNAKE_CASE : Any = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
__SCREAMING_SNAKE_CASE : Optional[int] = padding_value
# make sure array is in float32
__SCREAMING_SNAKE_CASE : int = x.astype(np.floataa )
return x
def __magic_name__( self :int , lowerCAmelCase__ :List[np.ndarray] , lowerCAmelCase__ :Optional[np.ndarray] = None ) -> List[np.ndarray]:
__SCREAMING_SNAKE_CASE : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ , lowerCAmelCase__ , self.padding_value ) for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
def __call__( self :Optional[int] , lowerCAmelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :int , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__SCREAMING_SNAKE_CASE : Any = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [raw_speech]
# extract fbank features
__SCREAMING_SNAKE_CASE : str = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
__SCREAMING_SNAKE_CASE : Any = BatchFeature({'''input_features''': features} )
__SCREAMING_SNAKE_CASE : Optional[int] = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
__SCREAMING_SNAKE_CASE : str = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
__SCREAMING_SNAKE_CASE : Any = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__SCREAMING_SNAKE_CASE : Tuple = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__SCREAMING_SNAKE_CASE : Dict = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
from datetime import datetime as dt
import os
from github import Github
__lowerCAmelCase : Dict =[
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Dict = g.get_repo('''huggingface/transformers''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from math import factorial
def _UpperCamelCase ( lowercase__ = 100 ):
return sum(map(lowercase__ , str(factorial(lowercase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Optional[Any] ) -> str:
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(3 , 4 )
__SCREAMING_SNAKE_CASE : str = nn.BatchNormad(4 )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(4 , 5 )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Tuple ) -> Any:
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , model.state_dict() )
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(lowerCAmelCase__ , '''index.json''' )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(lowerCAmelCase__ , f'''{key}.dat''' )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def __magic_name__( self :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__SCREAMING_SNAKE_CASE : str = torch.randn(2 , 3 , dtype=lowerCAmelCase__ )
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Tuple = offload_weight(lowerCAmelCase__ , '''weight''' , lowerCAmelCase__ , {} )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(lowerCAmelCase__ , '''weight.dat''' )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
self.assertDictEqual(lowerCAmelCase__ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(lowerCAmelCase__ ).split('''.''' )[1]}} )
__SCREAMING_SNAKE_CASE : List[Any] = load_offloaded_weight(lowerCAmelCase__ , index['''weight'''] )
self.assertTrue(torch.equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = ModelForTest()
__SCREAMING_SNAKE_CASE : int = model.state_dict()
__SCREAMING_SNAKE_CASE : List[Any] = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
__SCREAMING_SNAKE_CASE : Optional[int] = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) )
__SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in state_dict.items() if '''weight''' in k}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
# Duplicates are removed
__SCREAMING_SNAKE_CASE : Optional[int] = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
__SCREAMING_SNAKE_CASE : Union[str, Any] = extract_submodules_state_dict(lowerCAmelCase__ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(lowerCAmelCase__ , {'''a.1''': 0, '''a.2''': 2} )
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
__SCREAMING_SNAKE_CASE : List[str] = extract_submodules_state_dict(lowerCAmelCase__ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(lowerCAmelCase__ , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
SCREAMING_SNAKE_CASE__ : List[str] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
@property
def __magic_name__( self :List[str] ) -> Optional[Any]:
return 32
@property
def __magic_name__( self :Any ) -> Any:
return 32
@property
def __magic_name__( self :int ) -> str:
return self.time_input_dim
@property
def __magic_name__( self :Optional[int] ) -> str:
return self.time_input_dim * 4
@property
def __magic_name__( self :Dict ) -> Dict:
return 100
@property
def __magic_name__( self :Dict ) -> Dict:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def __magic_name__( self :int ) -> Optional[int]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __magic_name__( self :List[Any] ) -> str:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.dummy_unet
__SCREAMING_SNAKE_CASE : Any = self.dummy_movq
__SCREAMING_SNAKE_CASE : List[Any] = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int]=0 ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
__SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__( self :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = output.images
__SCREAMING_SNAKE_CASE : int = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__SCREAMING_SNAKE_CASE : Dict = init_image.resize((512, 512) )
__SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 255.0
__SCREAMING_SNAKE_CASE : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''A robot, 4k photo'''
__SCREAMING_SNAKE_CASE : Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : int = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = pipe_prior(
lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.85 , generator=lowerCAmelCase__ , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE : Dict = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ='T5Config'
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros_like(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__SCREAMING_SNAKE_CASE : List[str] = shifted_input_ids.at[:, 0].set(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = jnp.where(shifted_input_ids == -100 , lowercase__ , lowercase__ )
return shifted_input_ids
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''mt5'''
SCREAMING_SNAKE_CASE__ : Tuple = MTaConfig
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''mt5'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = MTaConfig
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''mt5'''
SCREAMING_SNAKE_CASE__ : int = MTaConfig
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
from math import isqrt
def _UpperCamelCase ( lowercase__ ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _UpperCamelCase ( lowercase__ = 10**6 ):
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
__lowerCAmelCase : List[str] ='\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n'
__lowerCAmelCase : Dict ='\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n'
__lowerCAmelCase : Optional[Any] ='\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :str ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def __magic_name__( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None ) -> int:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ ) ),
}
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
__lowerCAmelCase : str ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = set()
# keep track of all the paths to be checked
__SCREAMING_SNAKE_CASE : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__SCREAMING_SNAKE_CASE : List[Any] = queue.pop(0 )
# get the last node from the path
__SCREAMING_SNAKE_CASE : Optional[int] = path[-1]
if node not in explored:
__SCREAMING_SNAKE_CASE : Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__SCREAMING_SNAKE_CASE : int = list(lowercase__ )
new_path.append(lowercase__ )
queue.append(lowercase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowercase__ )
# in case there's no path between the 2 nodes
return []
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__SCREAMING_SNAKE_CASE : int = [start]
__SCREAMING_SNAKE_CASE : Dict = set(lowercase__ )
# Keep tab on distances from `start` node.
__SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
__SCREAMING_SNAKE_CASE : str = queue.pop(0 )
if node == target:
__SCREAMING_SNAKE_CASE : List[str] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowercase__ )
queue.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE : Any = 128
elif "12-12" in model_name:
__SCREAMING_SNAKE_CASE : List[str] = 12
__SCREAMING_SNAKE_CASE : Dict = 12
elif "14-14" in model_name:
__SCREAMING_SNAKE_CASE : Tuple = 14
__SCREAMING_SNAKE_CASE : List[Any] = 14
elif "16-16" in model_name:
__SCREAMING_SNAKE_CASE : List[str] = 16
__SCREAMING_SNAKE_CASE : Tuple = 16
else:
raise ValueError('''Model not supported''' )
__SCREAMING_SNAKE_CASE : List[Any] = '''huggingface/label-files'''
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE : Dict = 35
__SCREAMING_SNAKE_CASE : Any = '''speech-commands-v2-id2label.json'''
else:
__SCREAMING_SNAKE_CASE : Tuple = 527
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''audioset-id2label.json'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : str = {int(lowercase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[Any] = idalabel
__SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ ):
if "module.v" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : int = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = int(key_split[3] )
__SCREAMING_SNAKE_CASE : Any = config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : int = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : int = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : List[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : int = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Any = val
return orig_state_dict
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = get_audio_spectrogram_transformer_config(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
__SCREAMING_SNAKE_CASE : Tuple = model_name_to_url[model_name]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase__ , map_location='''cpu''' )
# remove some keys
remove_keys(lowercase__ )
# rename some keys
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(lowercase__ , lowercase__ )
# load 🤗 model
__SCREAMING_SNAKE_CASE : Union[str, Any] = ASTForAudioClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__SCREAMING_SNAKE_CASE : Optional[int] = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
__SCREAMING_SNAKE_CASE : List[str] = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
__SCREAMING_SNAKE_CASE : int = 1024 if '''speech-commands''' not in model_name else 128
__SCREAMING_SNAKE_CASE : List[str] = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ )
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE : List[str] = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
__SCREAMING_SNAKE_CASE : Any = dataset[0]['''audio''']['''array''']
else:
__SCREAMING_SNAKE_CASE : Tuple = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = torchaudio.load(lowercase__ )
__SCREAMING_SNAKE_CASE : int = waveform.squeeze().numpy()
__SCREAMING_SNAKE_CASE : Any = feature_extractor(lowercase__ , sampling_rate=16000 , return_tensors='''pt''' )
# forward pass
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Any =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCAmelCase : Union[str, Any] =['text', 'image', 'audio']
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase__ , lowercase__ ):
inputs.append(create_inputs(lowercase__ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for output in outputs:
if isinstance(lowercase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(lowercase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(lowercase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _lowercase :
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Tuple:
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
__SCREAMING_SNAKE_CASE : Tuple = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__SCREAMING_SNAKE_CASE : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __magic_name__( self :Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = create_inputs(self.tool.inputs )
__SCREAMING_SNAKE_CASE : Optional[int] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
__SCREAMING_SNAKE_CASE : str = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) , self.tool.outputs )
def __magic_name__( self :Optional[Any] ) -> str:
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Dict = create_inputs(self.tool.inputs )
__SCREAMING_SNAKE_CASE : Any = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ , self.tool.outputs ):
__SCREAMING_SNAKE_CASE : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :str ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = create_inputs(self.tool.inputs )
__SCREAMING_SNAKE_CASE : Any = []
for _input, input_type in zip(lowerCAmelCase__ , self.tool.inputs ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__SCREAMING_SNAKE_CASE : Optional[int] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) , len(self.tool.outputs ) )
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
__SCREAMING_SNAKE_CASE : List[Any] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
__SCREAMING_SNAKE_CASE : Tuple = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowercase__ , 1 ):
if n < _p:
# then we have our last prime to check
__SCREAMING_SNAKE_CASE : Union[str, Any] = primes[:idx]
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__SCREAMING_SNAKE_CASE : List[str] = False
for r in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pow(lowercase__ , d * 2**r , lowercase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__SCREAMING_SNAKE_CASE : int = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
def _UpperCamelCase ( lowercase__ = 1000000 ):
__SCREAMING_SNAKE_CASE : List[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
SCREAMING_SNAKE_CASE__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :Dict ) -> Dict:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any]=0 ) -> List[Any]:
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = 2
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase__ , device=torch.device(lowerCAmelCase__ ) , )
__SCREAMING_SNAKE_CASE : Any = floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __magic_name__( self :Tuple ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__( self :Dict ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __magic_name__( self :int ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __magic_name__( self :Optional[int] ) -> List[str]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase__ :Tuple ):
if isinstance(lowerCAmelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__SCREAMING_SNAKE_CASE : List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Any = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Optional[int] = MultiControlNetModel([controlneta, controlneta] )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any=0 ) -> List[str]:
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase__ , device=torch.device(lowerCAmelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase__ , device=torch.device(lowerCAmelCase__ ) , ),
]
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
__SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __magic_name__( self :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 10.0
__SCREAMING_SNAKE_CASE : Optional[Any] = 4
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = scale
__SCREAMING_SNAKE_CASE : List[str] = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = steps
__SCREAMING_SNAKE_CASE : Optional[Any] = scale
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = steps
__SCREAMING_SNAKE_CASE : List[str] = scale
__SCREAMING_SNAKE_CASE : Tuple = pipe(**lowerCAmelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = steps
__SCREAMING_SNAKE_CASE : int = scale
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __magic_name__( self :Any ) -> Optional[int]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : int = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : int = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
__SCREAMING_SNAKE_CASE : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase__ , controlnet=lowerCAmelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = '''evil space-punk bird'''
__SCREAMING_SNAKE_CASE : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
__SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
__SCREAMING_SNAKE_CASE : List[str] = pipe(
lowerCAmelCase__ , lowerCAmelCase__ , control_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
__SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
from __future__ import annotations
import math
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
)
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Any = [90, 23, 6, 33, 21, 65, 123, 34423]
__SCREAMING_SNAKE_CASE : Dict = math.log(len(lowercase__ ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , lowercase__ , lowercase__ , lowercase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Optional[Any] =[
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
__lowerCAmelCase : int ={
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Optional[Any] ={f"""funnel-transformer/{name}""": 5_1_2 for name in _model_names}
__lowerCAmelCase : Optional[Any] ={f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : int = FunnelTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : int = 2
def __init__( self :List[str] , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Dict="<unk>" , lowerCAmelCase__ :Union[str, Any]="<sep>" , lowerCAmelCase__ :Optional[int]="<pad>" , lowerCAmelCase__ :Tuple="<cls>" , lowerCAmelCase__ :Union[str, Any]="<mask>" , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int="##" , **lowerCAmelCase__ :int , ) -> int:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : Dict = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : List[Any] = do_lower_case
__SCREAMING_SNAKE_CASE : List[Any] = strip_accents
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Dict = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=None ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : Tuple = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : int ={
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''roberta-prelayernorm'''
def __init__( self :List[str] , lowerCAmelCase__ :int=50_265 , lowerCAmelCase__ :Any=768 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :str=1E-1_2 , lowerCAmelCase__ :Dict=1 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :int="absolute" , lowerCAmelCase__ :int=True , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :Tuple , ) -> List[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _UpperCamelCase ( lowercase__ = True , *lowercase__ , **lowercase__ ):
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
__SCREAMING_SNAKE_CASE : int = False
if main_process_only:
__SCREAMING_SNAKE_CASE : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = x
__SCREAMING_SNAKE_CASE : Optional[int] = y
for step in range(lowercase__ ): # noqa: B007
__SCREAMING_SNAKE_CASE : Dict = a * a - b * b + x
__SCREAMING_SNAKE_CASE : int = 2 * a * b + y
__SCREAMING_SNAKE_CASE : Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( lowercase__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _UpperCamelCase ( lowercase__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase__ , 1 , 1 ) )
def _UpperCamelCase ( lowercase__ = 800 , lowercase__ = 600 , lowercase__ = -0.6 , lowercase__ = 0 , lowercase__ = 3.2 , lowercase__ = 50 , lowercase__ = True , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = Image.new('''RGB''' , (image_width, image_height) )
__SCREAMING_SNAKE_CASE : str = img.load()
# loop through the image-coordinates
for image_x in range(lowercase__ ):
for image_y in range(lowercase__ ):
# determine the figure-coordinates based on the image-coordinates
__SCREAMING_SNAKE_CASE : str = figure_width / image_width * image_height
__SCREAMING_SNAKE_CASE : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
__SCREAMING_SNAKE_CASE : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__SCREAMING_SNAKE_CASE : List[str] = get_distance(lowercase__ , lowercase__ , lowercase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__SCREAMING_SNAKE_CASE : Optional[int] = get_color_coded_rgb(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : List[str] = get_black_and_white_rgb(lowercase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__lowerCAmelCase : Union[str, Any] =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowerCAmelCase : List[str] =['gpt2']
__lowerCAmelCase : int ='gpt2'
if is_tf_available():
class _lowercase ( tf.Module ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Any ) -> Tuple:
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = TFGPTaLMHeadModel.from_config(lowerCAmelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def __magic_name__( self :int , lowerCAmelCase__ :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenized['''input_ids'''].to_tensor()
__SCREAMING_SNAKE_CASE : List[str] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__SCREAMING_SNAKE_CASE : str = self.model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
super().setUp()
__SCREAMING_SNAKE_CASE : Optional[int] = [GPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__SCREAMING_SNAKE_CASE : int = [TFGPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__SCREAMING_SNAKE_CASE : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__SCREAMING_SNAKE_CASE : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer([test_inputs] , return_tensors='''tf''' )
__SCREAMING_SNAKE_CASE : List[str] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__SCREAMING_SNAKE_CASE : Optional[Any] = python_outputs[key].numpy()
__SCREAMING_SNAKE_CASE : Any = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def __magic_name__( self :Dict ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE : Optional[int] = tf.function(lowerCAmelCase__ )
for test_inputs in self.test_sentences:
__SCREAMING_SNAKE_CASE : int = tf.constant(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = compiled_tokenizer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf_tokenizer(lowerCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __magic_name__( self :List[str] ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE : int = ModelToSave(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.serving(lowerCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE : Dict = Path(lowerCAmelCase__ ) / '''saved.model'''
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={'''serving_default''': model.serving} )
__SCREAMING_SNAKE_CASE : int = tf.saved_model.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = loaded_model.signatures['''serving_default'''](lowerCAmelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def __magic_name__( self :List[str] ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE : Dict = tf_tokenizer(lowerCAmelCase__ ) # Build model with some sample inputs
__SCREAMING_SNAKE_CASE : str = tf_tokenizer.get_config()
__SCREAMING_SNAKE_CASE : int = TFGPTaTokenizer.from_config(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model_from_config(lowerCAmelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def __magic_name__( self :int ) -> str:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__SCREAMING_SNAKE_CASE : str = 123_123
for max_length in [3, 5, 1_024]:
__SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor([self.test_sentences[0]] )
__SCREAMING_SNAKE_CASE : Dict = tf_tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
SCREAMING_SNAKE_CASE__ : torch.FloatTensor = None
SCREAMING_SNAKE_CASE__ : Optional[Tuple[torch.FloatTensor]] = None
SCREAMING_SNAKE_CASE__ : Optional[Tuple[torch.FloatTensor]] = None
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Union[str, Any]="cls" , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Union[str, Any]=True , **lowerCAmelCase__ :Any , ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = project_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = pooler_fn
__SCREAMING_SNAKE_CASE : List[Any] = learn_encoder
__SCREAMING_SNAKE_CASE : List[str] = use_attention_mask
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [r'''pooler''', r'''logit_scale''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [r'''position_ids''', r'''predictions.decoder.bias''']
SCREAMING_SNAKE_CASE__ : List[Any] = '''roberta'''
SCREAMING_SNAKE_CASE__ : Any = RobertaSeriesConfig
def __init__( self :Tuple , lowerCAmelCase__ :str ) -> List[str]:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = XLMRobertaModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , '''has_pre_transformation''' , lowerCAmelCase__ )
if self.has_pre_transformation:
__SCREAMING_SNAKE_CASE : str = nn.Linear(config.hidden_size , config.project_dim )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> Dict:
__SCREAMING_SNAKE_CASE : str = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE : Optional[Any] = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
__SCREAMING_SNAKE_CASE : List[Any] = outputs['''hidden_states'''][-2]
__SCREAMING_SNAKE_CASE : List[str] = self.pre_LN(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__SCREAMING_SNAKE_CASE : List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''pixel_values''']
def __init__( self :Dict , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :str , ) -> None:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 224}
__SCREAMING_SNAKE_CASE : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name='''crop_size''' )
__SCREAMING_SNAKE_CASE : Optional[int] = do_resize
__SCREAMING_SNAKE_CASE : List[Any] = size
__SCREAMING_SNAKE_CASE : Tuple = resample
__SCREAMING_SNAKE_CASE : Dict = do_center_crop
__SCREAMING_SNAKE_CASE : Any = crop_size
__SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
__SCREAMING_SNAKE_CASE : Any = rescale_factor
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__SCREAMING_SNAKE_CASE : Optional[Any] = do_convert_rgb
def __magic_name__( self :List[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :str , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : Dict = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Dict , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[int] , ) -> Any:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :int = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :List[str] , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowerCAmelCase__ , param_name='''size''' , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : List[Any] = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Any = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__SCREAMING_SNAKE_CASE : str = [convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : List[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE : str = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : int = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : int = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : Dict = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : str = {
"""gpt-neox-20b""": 20_48,
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
__magic_name__ :List[str] = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
__magic_name__ :List[str] = add_prefix_space
__magic_name__ :List[str] = pre_tok_class(**__lowerCAmelCase )
__magic_name__ :Any = add_prefix_space
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
__magic_name__ :Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowerCamelCase (_a ):
_lowercase = """fnet"""
def __init__( self: Optional[Any],A_: str=3_2000,A_: Optional[Any]=768,A_: str=12,A_: List[str]=3072,A_: Union[str, Any]="gelu_new",A_: Optional[int]=0.1,A_: List[str]=512,A_: Optional[Any]=4,A_: Optional[int]=0.0_2,A_: Optional[Any]=1E-12,A_: int=False,A_: Any=512,A_: Optional[Any]=3,A_: List[Any]=1,A_: Tuple=2,**A_: Any,):
'''simple docstring'''
super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ )
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = type_vocab_size
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = use_tpu_fourier_optimizations
__UpperCamelCase = tpu_short_seq_length
| 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : Any=24 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : str=6 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : int=5_12 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=10_00 , ) -> Tuple:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = scope
_A = range_bbox
def snake_case_ ( self : int ) -> str:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A = bbox[i, j, 3]
_A = bbox[i, j, 1]
_A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_A = bbox[i, j, 2]
_A = bbox[i, j, 0]
_A = t
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self : str ) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , ) -> Dict:
_A = LiltModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_A = model(__lowerCAmelCase , bbox=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_A = model(__lowerCAmelCase , bbox=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , ) -> Tuple:
_A = self.num_labels
_A = LiltForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , ) -> int:
_A = LiltForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_A = model(
__lowerCAmelCase , bbox=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Dict ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A , _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Tuple = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : Optional[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Any = False
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[int]:
return True
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = LiltModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[Any] ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def snake_case_ ( self : Any ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def snake_case_ ( self : int ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@slow
def snake_case_ ( self : Optional[int] ) -> List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = LiltModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : str ) -> Tuple:
_A = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__lowerCAmelCase )
_A = torch.tensor([[1, 2]] , device=__lowerCAmelCase )
_A = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowerCAmelCase )
# forward pass
with torch.no_grad():
_A = model(input_ids=__lowerCAmelCase , bbox=__lowerCAmelCase )
_A = torch.Size([1, 2, 7_68] )
_A = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__lowerCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowerCAmelCase , atol=1E-3 ) )
| 2 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 0 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy'''
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self , A_=0 , A_=(4, 4, 64, 64) , A_=False )-> List[str]:
'''simple docstring'''
UpperCamelCase = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return image
def UpperCAmelCase_ ( self , A_=False , A_="CompVis/stable-diffusion-v1-4" )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase = 'bf16' if fpaa else None
UpperCamelCase , UpperCamelCase = FlaxUNetaDConditionModel.from_pretrained(
A_ , subfolder='unet' , dtype=A_ , revision=A_ )
return model, params
def UpperCAmelCase_ ( self , A_=0 , A_=(4, 77, 768) , A_=False )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=A_ )
UpperCamelCase = self.get_latents(A_ , fpaa=A_ )
UpperCamelCase = self.get_encoder_hidden_states(A_ , fpaa=A_ )
UpperCamelCase = model.apply(
{'params': params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
UpperCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCamelCase = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=A_ )
UpperCamelCase = self.get_latents(A_ , shape=(4, 4, 96, 96) , fpaa=A_ )
UpperCamelCase = self.get_encoder_hidden_states(A_ , shape=(4, 77, 1024) , fpaa=A_ )
UpperCamelCase = model.apply(
{'params': params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
UpperCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCamelCase = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
| 3 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase ,lowerCAmelCase = analyze_text(_UpperCAmelCase )
lowerCAmelCase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase = sum(single_char_strings.values() )
# one length string
lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase = single_char_strings[ch]
lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_UpperCAmelCase ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowerCAmelCase = sum(two_char_strings.values() )
lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase = cha + cha
if sequence in two_char_strings:
lowerCAmelCase = two_char_strings[sequence]
lowerCAmelCase = int(_UpperCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(_UpperCAmelCase )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = Counter() # type: ignore
lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_UpperCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ():
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowercase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_lowercase : str = "text"
_lowercase : str = "labels"
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_lowerCAmelCase = copy.deepcopy(self )
_lowerCAmelCase = self.label_schema.copy()
_lowerCAmelCase = features[self.label_column]
_lowerCAmelCase = label_schema
return task_template
@property
def _lowercase ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 5 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["torch", "scipy"]
def __init__( self :str , *__A :Union[str, Any] , **__A :str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Union[str, Any] , **__A :str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Union[str, Any] , **__A :Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch""", """scipy"""] ) | 6 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 0 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : Any=125 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Union[str, Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_A = [F'''<extra_id_{i}>''' for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_A = len(set(filter(lambda _UpperCAmelCase : bool('extra_id' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
_A = extra_ids
_A = 2**8 # utf is 8 bits
# define special tokens dict
_A = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_A = len(self.special_tokens_encoder )
_A = len(_UpperCAmelCase )
for i, token in enumerate(_UpperCAmelCase ):
_A = self.vocab_size + i - n
_A = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCAmelCase_ ( self : int ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] ):
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
_A = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : str ):
_A = [chr(_UpperCAmelCase ) for i in text.encode('utf-8' )]
return tokens
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[Any] ):
if token in self.special_tokens_encoder:
_A = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_A = self.added_tokens_encoder[token]
elif len(_UpperCAmelCase ) != 1:
_A = self.unk_token_id
else:
_A = ord(_UpperCAmelCase ) + self._num_special_tokens
return token_id
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[Any] ):
if index in self.special_tokens_decoder:
_A = self.special_tokens_decoder[index]
else:
_A = chr(index - self._num_special_tokens )
return token
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[Any] ):
_A = b''
for token in tokens:
if token in self.special_tokens_decoder:
_A = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
_A = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
_A = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
_A = token.encode('utf-8' )
else:
_A = bytes([ord(_UpperCAmelCase )] )
bstring += tok_string
_A = bstring.decode('utf-8' , errors='ignore' )
return string
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
return ()
| 7 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 0 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=0): # a graph with Node 0,1,...,N-1
'''simple docstring'''
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , _UpperCAmelCase)] for i in range(0 , _UpperCAmelCase)
] # adjacency matrix for weight
__A : List[str] = [
[math.inf for j in range(0 , _UpperCAmelCase)] for i in range(0 , _UpperCAmelCase)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = w
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__A : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowercase__ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 8 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 0 |
import os
import numpy
import onnx
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = a.name
A__ = b.name
A__ = ''
A__ = ''
A__ = a == b
A__ = name_a
A__ = name_b
return res
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__UpperCamelCase , __UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __UpperCamelCase , __UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
for n in graph_proto.node:
_node_replace_input_with(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase ) -> Tuple:
A__ = os.path.dirname(__UpperCamelCase )
A__ = os.path.basename(__UpperCamelCase )
A__ = onnx.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(__UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__UpperCamelCase )
dup_set.add(__UpperCamelCase )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , __UpperCamelCase )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__UpperCamelCase )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' )
A__ = sorted(__UpperCamelCase )
_remove_dup_initializers_from_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 'optimized_' + model_file_name
A__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
onnx.save(__UpperCamelCase , __UpperCamelCase )
return new_model
| 9 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : Any , _A : List[Any]=7 , _A : int=3 , _A : Tuple=30 , _A : Union[str, Any]=400 , _A : List[str]=True , _A : Union[str, Any]=None , _A : int=True , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : Optional[Any]=[0.5, 0.5, 0.5] , _A : List[str]=True , _A : Union[str, Any]=1 / 255 , _A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def UpperCamelCase_ ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self : int , _A : Tuple , _A : Union[str, Any]=False ):
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(_A , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(_A , key=lambda _A : item[0] )[0]
_UpperCamelCase = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''do_rescale''' ) )
self.assertTrue(hasattr(_A , '''do_pad''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _A )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : int ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
_UpperCamelCase = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(_A , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(_A , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
# prepare image and target
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def UpperCamelCase_ ( self : List[str] ):
# prepare image, target and masks_path
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
_UpperCamelCase = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 10 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase_ = logging.get_logger(__name__)
class __A ( A ):
'''simple docstring'''
def __init__(self , *A , **A ) -> None:
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 11 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 0 |
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase__ : Optional[int] = 1
lowercase__ : Dict = 1
while repunit:
lowercase__ : List[Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( lowercase_ = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase__ : Union[str, Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowercase_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 12 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : Optional[Any] = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Dict = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : str = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 14 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 0 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A : Tuple = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__A : Union[str, Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__A : List[Any] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__A : int = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__A : Any = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__A : List[Any] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__A : str = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = randrange(len(A__ ) ), randrange(len(A__ ) )
SCREAMING_SNAKE_CASE = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __a ( A__ : int = 100 ):
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : str , A__ : List[Any] ):
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : Optional[Any] , A__ : int ):
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , A__ )
def __a ( A__ : Any , A__ : List[str] , A__ : Any ):
SCREAMING_SNAKE_CASE = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : Optional[int] , A__ : str ):
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , A__ )
def __a ( A__ : str , A__ : Optional[int] ):
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , A__ )
def __a ( A__ : Union[str, Any] , A__ : str , A__ : Optional[Any] ):
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def __a ( A__ : List[Any] , A__ : Optional[Any] , A__ : List[Any] ):
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __a ( ):
SCREAMING_SNAKE_CASE = [PokerHand(A__ ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE = poker_hands.copy()
shuffle(A__ )
SCREAMING_SNAKE_CASE = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __a ( ):
# Test that five high straights are compared correctly.
SCREAMING_SNAKE_CASE = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __a ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
SCREAMING_SNAKE_CASE = PokerHand("2C 4S AS 3D 5C" )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __a ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(A__ ) )
SCREAMING_SNAKE_CASE = os.path.join(A__ , "poker_hands.txt" )
with open(A__ ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE = line[:14].strip()
SCREAMING_SNAKE_CASE = line[15:].strip()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PokerHand(A__ ), PokerHand(A__ )
SCREAMING_SNAKE_CASE = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376 | 16 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : str , __A : Tuple , __A : Tuple=13 , __A : List[str]=7 , __A : List[str]=True , __A : Any=True , __A : List[str]=True , __A : Optional[int]=True , __A : Union[str, Any]=99 , __A : str=16 , __A : Optional[Any]=36 , __A : Union[str, Any]=6 , __A : Optional[int]=6 , __A : Optional[int]=6 , __A : List[str]=37 , __A : Optional[int]="gelu" , __A : List[str]=0.1 , __A : List[str]=0.1 , __A : Tuple=512 , __A : List[str]=16 , __A : int=2 , __A : Tuple=0.0_2 , __A : Optional[int]=3 , __A : List[Any]=4 , __A : int=None , ):
__A : Optional[int] = parent
__A : str = batch_size
__A : List[Any] = seq_length
__A : Union[str, Any] = is_training
__A : Dict = use_input_mask
__A : str = use_token_type_ids
__A : List[Any] = use_labels
__A : Optional[int] = vocab_size
__A : Dict = embedding_size
__A : Optional[int] = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Optional[Any] = num_hidden_groups
__A : List[Any] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : List[Any] = hidden_act
__A : Tuple = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Optional[int] = type_vocab_size
__A : Dict = type_sequence_label_size
__A : List[str] = initializer_range
__A : Optional[int] = num_labels
__A : Tuple = num_choices
__A : Union[str, Any] = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Any = None
if self.use_input_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Optional[Any] = None
__A : Dict = None
__A : List[str] = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase_ ( self : int , __A : Dict , __A : Dict , __A : str , __A : Optional[int] , __A : Optional[Any] , __A : str , __A : int ):
__A : List[Any] = AlbertModel(config=__A )
model.to(__A )
model.eval()
__A : int = model(__A , attention_mask=__A , token_type_ids=__A )
__A : Dict = model(__A , token_type_ids=__A )
__A : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str , __A : Tuple , __A : Union[str, Any] , __A : Any , __A : Union[str, Any] , __A : Tuple , __A : List[Any] ):
__A : int = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
__A : Any = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase_ ( self : List[Any] , __A : Dict , __A : int , __A : Optional[int] , __A : Optional[int] , __A : Any , __A : Dict , __A : Optional[int] ):
__A : List[str] = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
__A : Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : int , __A : Any , __A : Optional[Any] , __A : Union[str, Any] ):
__A : List[str] = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__A : str = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Optional[int] , __A : Tuple , __A : str , __A : Tuple , __A : List[str] , __A : int , __A : List[Any] , __A : Optional[Any] ):
__A : Dict = self.num_labels
__A : Optional[Any] = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
__A : str = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , __A : List[Any] , __A : Tuple , __A : Optional[Any] , __A : Optional[int] , __A : str , __A : Tuple , __A : str ):
__A : Any = self.num_labels
__A : Optional[int] = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
__A : Optional[int] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __A : List[Any] , __A : Any , __A : Optional[int] , __A : Optional[int] , __A : Union[str, Any] , __A : Dict , __A : int ):
__A : Dict = self.num_choices
__A : List[str] = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__A : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Union[str, Any] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : List[Any] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : List[str] = config_and_inputs
__A : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase : List[str] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : str = True
def lowerCAmelCase_ ( self : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Tuple=False ):
__A : List[str] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
__A : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
__A : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def lowerCAmelCase_ ( self : Dict ):
__A : Optional[Any] = AlbertModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def lowerCAmelCase_ ( self : int ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def lowerCAmelCase_ ( self : Optional[int] ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCAmelCase_ ( self : Tuple ):
__A : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : Optional[int] = type
self.model_tester.create_and_check_model(*__A )
@slow
def lowerCAmelCase_ ( self : int ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
__A : Dict = AlbertModel.from_pretrained("""albert-base-v2""" )
__A : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__A : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A : Any = model(__A , attention_mask=__A )[0]
__A : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
__A : Any = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 17 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 0 ):
'''simple docstring'''
_lowerCAmelCase = length or len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase , _lowerCAmelCase = list_data[i + 1], list_data[i]
_lowerCAmelCase = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['audio_values', 'audio_mask']
def __init__( self , __a=20_48 , __a=1 , __a=[16, 16] , __a=1_28 , __a=4_41_00 , __a=86 , __a=20_48 , __a=0.0 , **__a , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , **__a , )
_UpperCamelCase = spectrogram_length
_UpperCamelCase = num_channels
_UpperCamelCase = patch_size
_UpperCamelCase = feature_size // self.patch_size[1]
_UpperCamelCase = n_fft
_UpperCamelCase = sampling_rate // hop_length_to_sampling_rate
_UpperCamelCase = sampling_rate
_UpperCamelCase = padding_value
_UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=__a , norm='''slaney''' , mel_scale='''slaney''' , ).T
def UpperCAmelCase ( self , __a) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = spectrogram(
__a , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
_UpperCamelCase = log_spec[:, :-1]
_UpperCamelCase = log_spec - 20.0
_UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , __a , __a = None , __a = True , __a = None , __a = False , __a = False , **__a , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
_UpperCamelCase = isinstance(__a , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''')
_UpperCamelCase = is_batched_numpy or (
isinstance(__a , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_UpperCamelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray):
_UpperCamelCase = np.asarray(__a , dtype=np.floataa)
elif isinstance(__a , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_UpperCamelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCamelCase = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __a):
_UpperCamelCase = [np.asarray(__a , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
_UpperCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCamelCase = np.array(__a).astype(np.floataa)
# convert into correct format for padding
_UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCamelCase = np.ones([len(__a), 1, max_time_len, self.feature_size]).astype(np.floataa)
_UpperCamelCase = padded_audio_features * self.padding_value
for i in range(len(__a)):
_UpperCamelCase = audio_features[i]
_UpperCamelCase = feature
# return as BatchFeature
if return_attention_mask:
_UpperCamelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
_UpperCamelCase = {'''audio_values''': padded_audio_features}
_UpperCamelCase = BatchFeature(data=__a , tensor_type=__a)
return encoded_inputs
| 19 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 0 |
def _lowercase( __a : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
a__ =sum(__a ) / len(__a ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCamelCase ):
if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}." )
if i == 0:
__magic_name__ , __magic_name__ : List[str] =(
(Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase )
else:
return _interleave_iterable_datasets(
lowerCamelCase , lowerCamelCase , lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , stopping_strategy=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCamelCase ):
if not isinstance(lowerCamelCase , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowerCamelCase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCamelCase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase ).__name__}." )
if i == 0:
__magic_name__ , __magic_name__ : List[Any] =(
(Dataset, IterableDataset) if isinstance(lowerCamelCase , lowerCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase )
else:
return _concatenate_iterable_datasets(lowerCamelCase , info=lowerCamelCase , split=lowerCamelCase , axis=lowerCamelCase )
| 21 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case : Any = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = 'AutoTokenizer'
lowercase_ = ['tokenizer']
lowercase_ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=None ) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
_a = speaker_embeddings
@classmethod
def __lowerCAmelCase ( cls : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]="speaker_embeddings_path.json" , **lowerCAmelCase_ : Any ) -> str:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_a = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase_ ) , revision=kwargs.pop('''revision''' , lowerCAmelCase_ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_a = None
else:
with open(lowerCAmelCase_ ) as speaker_embeddings_json:
_a = json.load(lowerCAmelCase_ )
else:
_a = None
_a = AutoTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(tokenizer=lowerCAmelCase_ , speaker_embeddings=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any]="speaker_embeddings_path.json" , lowerCAmelCase_ : Dict="speaker_embeddings" , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : List[str] , ) -> str:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , '''v2''' ) , exist_ok=lowerCAmelCase_ )
_a = {}
_a = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a = self._load_voice_preset(lowerCAmelCase_ )
_a = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , lowerCAmelCase_ , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCAmelCase_ , )
_a = os.path.join(lowerCAmelCase_ , F'{prompt_key}_{key}.npy' )
_a = tmp_dict
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : str = None , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = self.speaker_embeddings[voice_preset]
_a = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_a = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase_ ) , revision=kwargs.pop('''revision''' , lowerCAmelCase_ ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_a = np.load(lowerCAmelCase_ )
return voice_preset_dict
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[dict] = None ) -> Any:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Union[str, Any] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict="pt" , lowerCAmelCase_ : Dict=2_56 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : int , ) -> str:
"""simple docstring"""
if voice_preset is not None and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a = self._load_voice_preset(lowerCAmelCase_ )
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not voice_preset.endswith('''.npz''' ):
_a = voice_preset + '''.npz'''
_a = np.load(lowerCAmelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
_a = self.tokenizer(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding='''max_length''' , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
if voice_preset is not None:
_a = voice_preset
return encoded_text
| 22 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
import numpy
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
UpperCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase_ = numpy.zeros(output_array.shape )
def _UpperCAmelCase ( self ) -> numpy.ndarray:
UpperCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _UpperCAmelCase ( self ) -> None:
UpperCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
for iteration in range(1 , iterations + 1 ):
UpperCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
UpperCamelCase_ = input_arr
UpperCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case (__lowercase):
return 1 / (1 + numpy.exp(-value))
def _snake_case (__lowercase):
return (value) * (1 - (value))
def _snake_case ():
UpperCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
UpperCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=__lowercase , output_array=__lowercase)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowercase , iterations=10 , give_loss=__lowercase)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 23 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 0 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , __SCREAMING_SNAKE_CASE = 101 ) -> List[str]:
'''simple docstring'''
__snake_case = length
def __len__( self ) -> Any:
'''simple docstring'''
return self.length
def __getitem__( self , __SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return i
class lowerCAmelCase :
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return {"input_ids": torch.tensor(__SCREAMING_SNAKE_CASE ), "labels": torch.tensor(__SCREAMING_SNAKE_CASE )}
class lowerCAmelCase ( nn.Module):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__snake_case = nn.Linear(120 , 80 )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase ( __lowerCAmelCase):
@require_torch_neuroncore
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = F'''--output_dir {output_dir}'''.split()
__snake_case = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase ( __lowerCAmelCase):
@require_torch_multi_gpu
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = F'''--output_dir {output_dir}'''.split()
__snake_case = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser((TrainingArguments,))
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
UpperCAmelCase_ : int = DummyDataset(dataset_length)
def _UpperCamelCase (_lowerCamelCase : EvalPrediction )-> Dict:
'''simple docstring'''
__snake_case = list(range(len(_lowerCamelCase ) ) )
__snake_case = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
UpperCAmelCase_ : Union[str, Any] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase_ : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase_ : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase_ : str = None
| 24 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : List[str] , *a : Optional[int] , **a : Dict ) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , a , )
super().__init__(*a , **a ) | 25 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: # noqa: E741
"""simple docstring"""
while r - l > 1:
__snake_case : str = (l + r) // 2
if v[m] >= key:
__snake_case : Any = m
else:
__snake_case : Union[str, Any] = m # noqa: E741
return r
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
return 0
__snake_case : Optional[Any] = [0] * len(_lowerCamelCase )
__snake_case : Dict = 1
__snake_case : Any = v[0]
for i in range(1 , len(_lowerCamelCase ) ):
if v[i] < tail[0]:
__snake_case : List[str] = v[i]
elif v[i] > tail[length - 1]:
__snake_case : Any = v[i]
length += 1
else:
__snake_case : Optional[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__A : Optional[Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ):
_A = d_model
_A = parent
_A = batch_size
_A = prediction_length
_A = context_length
_A = cardinality
_A = num_time_features
_A = lags_sequence
_A = embedding_dimension
_A = is_training
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = context_length
_A = prediction_length + label_length
_A = label_length
_A = moving_average
_A = autocorrelation_factor
def lowerCAmelCase__ ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = config.context_length + max(config.lags_sequence )
_A = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_A = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_A = floats_tensor([self.batch_size, _past_length] )
_A = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_A = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_A = floats_tensor([self.batch_size, config.prediction_length] )
_A = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def lowerCAmelCase__ ( self ):
_A = self.get_config()
_A = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowerCAmelCase__ ( self ):
_A, _A = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_A = model(**snake_case_ )
_A = outputs.encoder_last_hidden_state
_A = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_A = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_A, _A, _A, _A, _A = model.create_network_inputs(**snake_case_ )
_A, _A = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_A = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_A = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_A = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_A = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_A = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_A = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_A = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_A = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_A = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__magic_name__ = (AutoformerForPrediction,) if is_torch_available() else ()
__magic_name__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = AutoformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_A, _A = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info['missing_keys'] , [] )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason='Model has no tokens embeddings' )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
_A = inspect.signature(getattr(snake_case_ , 'forward' ) )
# The main input is the name of the argument after `self`
_A = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(snake_case_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowerCAmelCase__ ( self ):
_A, _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
_A = getattr(self.model_tester , 'seq_length' , snake_case_ )
_A = getattr(self.model_tester , 'decoder_seq_length' , snake_case_ )
_A = getattr(self.model_tester , 'encoder_seq_length' , snake_case_ )
_A = getattr(self.model_tester , 'd_model' , snake_case_ )
_A = getattr(self.model_tester , 'num_attention_heads' , snake_case_ )
_A = d_model // num_attention_heads
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_A = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_A = len(snake_case_ )
_A = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_A = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_A = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase__ ( self ):
super().test_retain_grad_hidden_states_attentions()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE="train-batch.pt" ) -> Any:
"""simple docstring"""
_A = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_SCREAMING_SNAKE_CASE , repo_type='dataset' )
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case_ )
_A = prepare_batch()
with torch.no_grad():
_A = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
_A = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self ):
_A = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case_ )
_A = prepare_batch('val-batch.pt' )
with torch.no_grad():
_A = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
_A = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self ):
_A = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(snake_case_ )
_A = prepare_batch('val-batch.pt' )
with torch.no_grad():
_A = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
_A = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_A = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case_ )
_A = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1E-1 ) )
| 27 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 0 |
'''simple docstring'''
import operator as op
UpperCamelCase_ = "scaler.pt"
UpperCamelCase_ = "pytorch_model"
UpperCamelCase_ = "random_states"
UpperCamelCase_ = "optimizer"
UpperCamelCase_ = "scheduler"
UpperCamelCase_ = "pytorch_model.bin"
UpperCamelCase_ = "pytorch_model.bin.index.json"
UpperCamelCase_ = "model.safetensors"
UpperCamelCase_ = "model.safetensors.index.json"
UpperCamelCase_ = "1.10.2"
UpperCamelCase_ = "py38"
UpperCamelCase_ = "4.17.0"
UpperCamelCase_ = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
UpperCamelCase_ = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
UpperCamelCase_ = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
UpperCamelCase_ = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
UpperCamelCase_ = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
UpperCamelCase_ = "2.0.1"
UpperCamelCase_ = ["pdsh", "standard", "openmpi", "mvapich"]
UpperCamelCase_ = ["default", "reduce-overhead", "max-autotune"]
UpperCamelCase_ = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase_ = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
UpperCamelCase_ = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
UpperCamelCase_ = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 28 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 29 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = emb.weight.shape
UpperCAmelCase_ : Union[str, Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase_ : str = emb.weight.data
return lin_layer
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.load(_lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
UpperCAmelCase_ : int = mam_aaa['''model''']
remove_ignore_keys_(_lowercase )
UpperCAmelCase_ : Optional[int] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase_ : Any = MaMaaaConfig(
vocab_size=_lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
UpperCAmelCase_ : List[str] = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase_ : Union[str, Any] = MaMaaaForConditionalGeneration(_lowercase )
model.model.load_state_dict(_lowercase , strict=_lowercase )
UpperCAmelCase_ : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__a = parser.parse_args()
__a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 30 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 0 |
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Dict ):
return str(self.k )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 31 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def A__ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
from collections import deque
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = len(__lowerCAmelCase )
snake_case__ = deque()
snake_case__ = [False for _ in range(__lowerCAmelCase )]
snake_case__ = [-1 for _ in range(__lowerCAmelCase )]
snake_case__ = index_of[:]
def strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = index # the number when this node is seen
snake_case__ = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
snake_case__ = True
for w in g[v]:
if index_of[w] == -1:
snake_case__ = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case__ = []
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
while w != v:
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
snake_case__ = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
lowerCamelCase__ : Tuple = 7
lowerCamelCase__ : Optional[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCamelCase__ : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCamelCase__ : int = [(u, v) for u, v in zip(source, target)]
lowerCamelCase__ : List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 33 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''input_features''', '''is_longer''']
def __init__( self , lowerCamelCase_=6_4 , lowerCamelCase_=4_8_0_0_0 , lowerCamelCase_=4_8_0 , lowerCamelCase_=1_0 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_4_0_0_0 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = top_db
UpperCamelCase = truncation
UpperCamelCase = padding
UpperCamelCase = fft_window_size
UpperCamelCase = (fft_window_size >> 1) + 1
UpperCamelCase = hop_length
UpperCamelCase = max_length_s
UpperCamelCase = max_length_s * sampling_rate
UpperCamelCase = sampling_rate
UpperCamelCase = frequency_min
UpperCamelCase = frequency_max
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm=lowerCamelCase_ , mel_scale='''htk''' , )
UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase__ ( self) -> Dict[str, Any]:
UpperCamelCase = copy.deepcopy(self.__dict__)
UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> np.ndarray:
UpperCamelCase = spectrogram(
lowerCamelCase_ , window_function(self.fft_window_size , '''hann''') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase_ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase = [0]
# randomly choose index for each part
UpperCamelCase = np.random.choice(ranges[0])
UpperCamelCase = np.random.choice(ranges[1])
UpperCamelCase = np.random.choice(ranges[2])
UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
UpperCamelCase = torch.tensor(mel[None, None, :])
UpperCamelCase = torch.nn.functional.interpolate(
lowerCamelCase_ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=lowerCamelCase_)
UpperCamelCase = mel_shrink[0][0].numpy()
UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCamelCase = len(lowerCamelCase_) - max_length
UpperCamelCase = np.random.randint(0 , overflow + 1)
UpperCamelCase = waveform[idx : idx + max_length]
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters)
UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0)
UpperCamelCase = False
else:
UpperCamelCase = self._random_mel_fusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented')
else:
UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCamelCase = int(max_length / len(lowerCamelCase_))
UpperCamelCase = np.stack(np.tile(lowerCamelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
UpperCamelCase = int(max_length / len(lowerCamelCase_))
UpperCamelCase = np.stack(np.tile(lowerCamelCase_ , lowerCamelCase_))
UpperCamelCase = np.pad(lowerCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0)
if truncation == "fusion":
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters)
UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchFeature:
UpperCamelCase = truncation if truncation is not None else self.truncation
UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
UpperCamelCase = isinstance(lowerCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
UpperCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray):
UpperCamelCase = np.asarray(lowerCamelCase_ , dtype=np.floataa)
elif isinstance(lowerCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCamelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray(lowerCamelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
UpperCamelCase = [
self._get_input_mel(lowerCamelCase_ , max_length if max_length else self.nb_max_samples , lowerCamelCase_ , lowerCamelCase_)
for waveform in raw_speech
]
UpperCamelCase = []
UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase_)
is_longer.append(lowerCamelCase_)
if truncation == "fusion" and sum(lowerCamelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCamelCase = np.random.randint(0 , len(lowerCamelCase_))
UpperCamelCase = True
if isinstance(input_mel[0] , lowerCamelCase_):
UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
UpperCamelCase = [[longer] for longer in is_longer]
UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
UpperCamelCase = BatchFeature(lowerCamelCase_)
if return_tensors is not None:
UpperCamelCase = input_features.convert_to_tensors(lowerCamelCase_)
return input_features | 34 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase : List[Any] = '''ViTImageProcessor'''
lowerCamelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , _lowercase : Dict=None , _lowercase : List[str]=None , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
def __call__( self : Optional[Any] , _lowercase : List[Any]=None , _lowercase : str=None , _lowercase : int=None , _lowercase : Dict=None , **_lowercase : Dict ):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
SCREAMING_SNAKE_CASE__ : str = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE__ : Any = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def lowercase__ ( self : int , *_lowercase : List[Any] , **_lowercase : Optional[Any] ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def lowercase__ ( self : Dict , *_lowercase : Union[str, Any] , **_lowercase : Union[str, Any] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def lowercase__ ( self : int ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def lowercase__ ( self : Optional[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 35 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 0 |
def lowercase ( __A : int = 10 ) -> str:
'''simple docstring'''
if not isinstance(__A , __A ) or n < 0:
raise ValueError("""Invalid input""" )
snake_case : Dict = 10**n
snake_case : Optional[int] = 2_8433 * (pow(2 , 783_0457 , __A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 36 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : str=2 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : int=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : Union[str, Any]=32 * 8 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[int]=64 , ):
a__ : List[str] = parent
a__ : List[Any] = batch_size
a__ : Any = is_training
a__ : Optional[Any] = use_auxiliary_loss
a__ : Dict = num_queries
a__ : Any = num_channels
a__ : List[Any] = min_size
a__ : Dict = max_size
a__ : Dict = num_labels
a__ : str = hidden_dim
a__ : int = hidden_dim
def _UpperCamelCase( self : Union[str, Any] ):
a__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
a__ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
a__ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
a__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
a__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a__ : str = self.num_queries
a__ : Tuple = self.num_labels
a__ : Optional[int] = [1, 1, 1, 1]
a__ : Dict = self.num_channels
a__ : int = 64
a__ : Optional[Any] = 128
a__ : Optional[int] = self.hidden_dim
a__ : Dict = self.hidden_dim
a__ : Optional[Any] = self.hidden_dim
return config
def _UpperCamelCase( self : Optional[Any] ):
a__, a__, a__, a__, a__ : Union[str, Any] = self.prepare_config_and_inputs()
a__ : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ):
a__ : Any = output.encoder_hidden_states
a__ : Optional[Any] = output.pixel_decoder_hidden_states
a__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=False ):
with torch.no_grad():
a__ : List[str] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
a__ : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ):
a__ : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a__ : Tuple = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
a__ : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
a__ : Optional[Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowercase = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = MaskaFormerModelTester(self )
a__ : List[str] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : int ):
a__, a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _UpperCamelCase( self : Optional[Any] ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _UpperCamelCase( self : Dict ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _UpperCamelCase( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase( self : int ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : List[str] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a__ : Union[str, Any] = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = (self.model_tester.min_size,) * 2
a__ : Tuple = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
a__ : int = self.model_tester.get_config()
a__ : Any = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Optional[int] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
a__ : Any = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__ : Union[str, Any] = self.all_model_classes[1]
a__, a__, a__, a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs()
a__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : int = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Optional[int] ):
a__ : str = self.all_model_classes[1]
a__, a__, a__, a__, a__ : int = self.model_tester.prepare_config_and_inputs()
a__ : Optional[Any] = True
a__ : str = True
a__ : Any = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
a__ : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a__ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a__ : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase : Any = 1E-4
def UpperCamelCase_ ( ) -> str:
a__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : str ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase( self : str ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase( self : Dict ):
a__ : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
a__ : List[Any] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
a__ : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ : str = model(**lowerCamelCase__ )
a__ : str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
a__ : List[str] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
a__ : Tuple = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : List[Any] ):
a__ : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
a__ : List[Any] = self.default_image_processor
a__ : str = prepare_img()
a__ : Union[str, Any] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
a__ : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
# masks_queries_logits
a__ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a__ : Optional[int] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
a__ : int = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
a__ : Any = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a__ : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _UpperCamelCase( self : str ):
a__ : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
a__ : List[Any] = self.default_image_processor
a__ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
a__ : Any = inputs["pixel_values"].to(lowerCamelCase__ )
a__ : List[Any] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
a__ : List[Any] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 37 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 0 |
'''simple docstring'''
from math import factorial
def UpperCamelCase__ ( __magic_name__ : int = 1_00 ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for x in str(factorial(__magic_name__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 38 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : str , **_UpperCamelCase : List[str] ) ->None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 39 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.