code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Any:
'''simple docstring'''
if index == r:
for j in range(lowerCAmelCase__ ):
print(data[j] ,end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCamelCase : Tuple = arr[i]
combination_util(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,index + 1 ,lowerCAmelCase__ ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
# A temporary array to store all combination one by one
_UpperCamelCase : Optional[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,0 ,lowerCAmelCase__ ,0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCAmelCase_ : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 435 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
class A_ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = None
class A_ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
_lowerCAmelCase = datasets.Audio()
_lowerCAmelCase = 'audio'
_lowerCAmelCase = AudioFolderConfig
_lowerCAmelCase = 42 # definition at the bottom of the script
_lowerCAmelCase = AudioClassification(audio_column="""audio""" , label_column="""label""" )
snake_case_ : Dict = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
snake_case_ : Dict = AUDIO_EXTENSIONS
| 138 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE, unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = FunnelTokenizer
lowercase : List[str] = FunnelTokenizerFast
lowercase : str = True
lowercase : Optional[int] = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
_SCREAMING_SNAKE_CASE =tokenizer('''UNwant\u00E9d,running''' )
_SCREAMING_SNAKE_CASE =len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
_SCREAMING_SNAKE_CASE =tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 255 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : List[str] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 0 |
"""simple docstring"""
snake_case_ : int = {str(digit): digit**5 for digit in range(1_0)}
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase__ ) )
def lowercase_ ( ):
'''simple docstring'''
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(lowerCAmelCase__ ) )
if __name__ == "__main__":
print(solution())
| 595 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BertTokenizer
__UpperCAmelCase : Optional[Any] = BertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = '''a\n\'ll !!to?\'d of, can\'t.'''
__a = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 695 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = CLIPConfig
_a = ['CLIPEncoderLayer']
def __init__( self , _a ) -> Tuple:
super().__init__(_a )
_A : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
_A : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
_A : Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def a__ ( self , _a , _a , _a=0.5 , _a=0.5 ) -> Union[str, Any]:
_A : Optional[int] = self.vision_model(_a )[0]
_A : Optional[int] = self.p_head(_a )
_A : Tuple = nsfw_detected.flatten()
_A : int = nsfw_detected > p_threshold
_A : Any = nsfw_detected.tolist()
if any(_a ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(_a ):
if nsfw_detected_:
_A : Any = np.zeros(images[idx].shape )
_A : List[Any] = self.w_head(_a )
_A : Optional[Any] = watermark_detected.flatten()
_A : Optional[int] = watermark_detected > w_threshold
_A : str = watermark_detected.tolist()
if any(_a ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(_a ):
if watermark_detected_:
_A : Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 307 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : jnp.ndarray
__SCREAMING_SNAKE_CASE : jnp.ndarray
class _UpperCAmelCase ( nn.Module ):
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def a_ ( self ) -> int:
UpperCAmelCase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase = self.block_out_channels[i]
UpperCAmelCase = self.block_out_channels[i + 1]
UpperCAmelCase = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
UpperCAmelCase = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
UpperCAmelCase = blocks
UpperCAmelCase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = self.conv_in(_a )
UpperCAmelCase = nn.silu(_a )
for block in self.blocks:
UpperCAmelCase = block(_a )
UpperCAmelCase = nn.silu(_a )
UpperCAmelCase = self.conv_out(_a )
return embedding
@flax_register_to_config
class _UpperCAmelCase ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : int = 3_2
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1_2_8_0
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : str = "rgb"
__SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def a_ ( self , lowercase_ ) -> Any:
# init input tensors
UpperCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase = jnp.zeros(_a , dtype=jnp.floataa )
UpperCAmelCase = jnp.ones((1,) , dtype=jnp.intaa )
UpperCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCAmelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase = jnp.zeros(_a , dtype=jnp.floataa )
UpperCAmelCase , UpperCAmelCase = jax.random.split(_a )
UpperCAmelCase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def a_ ( self ) -> Any:
UpperCAmelCase = self.block_out_channels
UpperCAmelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCAmelCase = FlaxTimestepEmbedding(_a , dtype=self.dtype )
UpperCAmelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCAmelCase = self.only_cross_attention
if isinstance(_a , _a ):
UpperCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
UpperCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = block_out_channels[0]
UpperCAmelCase = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase = output_channel
UpperCAmelCase = block_out_channels[i]
UpperCAmelCase = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCAmelCase = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
UpperCAmelCase = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
UpperCAmelCase = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
UpperCAmelCase = down_blocks
UpperCAmelCase = controlnet_down_blocks
# mid
UpperCAmelCase = block_out_channels[-1]
UpperCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCAmelCase = nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1.0 , lowercase_ = True , lowercase_ = False , ) -> Tuple:
UpperCAmelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
UpperCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase = jnp.expand_dims(_a , 0 )
UpperCAmelCase = self.time_proj(_a )
UpperCAmelCase = self.time_embedding(_a )
# 2. pre-process
UpperCAmelCase = jnp.transpose(_a , (0, 2, 3, 1) )
UpperCAmelCase = self.conv_in(_a )
UpperCAmelCase = jnp.transpose(_a , (0, 2, 3, 1) )
UpperCAmelCase = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
UpperCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
UpperCAmelCase , UpperCAmelCase = down_block(_a , _a , _a , deterministic=not train )
else:
UpperCAmelCase , UpperCAmelCase = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
UpperCAmelCase = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
UpperCAmelCase = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase = controlnet_down_block_res_samples
UpperCAmelCase = self.controlnet_mid_block(_a )
# 6. scaling
UpperCAmelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 373 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 | 0 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def UpperCAmelCase ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 334 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
__lowercase = 'deformable_detr'
__lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self :str , lowercase_ :Tuple=True , lowercase_ :Any=None , lowercase_ :Dict=3 , lowercase_ :Dict=3_00 , lowercase_ :str=10_24 , lowercase_ :Any=6 , lowercase_ :Dict=10_24 , lowercase_ :List[Any]=8 , lowercase_ :Dict=6 , lowercase_ :Union[str, Any]=10_24 , lowercase_ :int=8 , lowercase_ :Any=0.0 , lowercase_ :Any=True , lowercase_ :int="relu" , lowercase_ :Tuple=2_56 , lowercase_ :str=0.1 , lowercase_ :int=0.0 , lowercase_ :Tuple=0.0 , lowercase_ :Optional[int]=0.0_2 , lowercase_ :int=1.0 , lowercase_ :Optional[int]=True , lowercase_ :Dict=False , lowercase_ :List[str]="sine" , lowercase_ :Optional[Any]="resnet50" , lowercase_ :Any=True , lowercase_ :Union[str, Any]=False , lowercase_ :List[Any]=4 , lowercase_ :List[str]=4 , lowercase_ :Optional[Any]=4 , lowercase_ :Dict=False , lowercase_ :List[str]=3_00 , lowercase_ :int=False , lowercase_ :Tuple=1 , lowercase_ :Optional[int]=5 , lowercase_ :Optional[Any]=2 , lowercase_ :Union[str, Any]=1 , lowercase_ :List[Any]=1 , lowercase_ :List[Any]=5 , lowercase_ :int=2 , lowercase_ :Tuple=0.1 , lowercase_ :Dict=0.2_5 , lowercase_ :int=False , **lowercase_ :List[str] , )-> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
A__ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_a , _a ):
A__ = backbone_config.get("model_type" )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(_a )
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# deformable attributes
A__ = num_feature_levels
A__ = encoder_n_points
A__ = decoder_n_points
A__ = two_stage
A__ = two_stage_num_proposals
A__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
A__ = focal_alpha
A__ = disable_custom_kernels
super().__init__(is_encoder_decoder=_a , **_a )
@property
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self :int )-> Optional[int]:
return self.d_model
def UpperCAmelCase_ ( self :int )-> List[Any]:
A__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
| 440 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'blip_2_vision_model'
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.0_0001 , _a=0.0 , _a=1E-10 , _a=True , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'blip_2_qformer'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'blip-2'
__UpperCAmelCase : List[str] = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
super().__init__(**_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__a = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__a = BlipaVisionConfig(**_a )
__a = BlipaQFormerConfig(**_a )
__a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__a = CONFIG_MAPPING[text_model_type](**_a )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def __UpperCAmelCase ( cls , _a , _a , _a , **_a , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {'''vocab_file''': '''spiece.model'''}
_UpperCAmelCase : int = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_UpperCAmelCase : Union[str, Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : List[Any] = 2
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : Dict = 4
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = 'left'
def __init__( self : List[str], UpperCamelCase__ : List[str], UpperCamelCase__ : Any=False, UpperCamelCase__ : Tuple=True, UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : Union[str, Any]="<s>", UpperCamelCase__ : Optional[Any]="</s>", UpperCamelCase__ : str="<unk>", UpperCamelCase__ : List[str]="<sep>", UpperCamelCase__ : str="<pad>", UpperCamelCase__ : Optional[Any]="<cls>", UpperCamelCase__ : Union[str, Any]="<mask>", UpperCamelCase__ : str=["<eop>", "<eod>"], UpperCamelCase__ : Union[str, Any] = None, **UpperCamelCase__ : List[str], ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(_a, lstrip=_a, rstrip=_a ) if isinstance(_a, _a ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a, remove_space=_a, keep_accents=_a, bos_token=_a, eos_token=_a, unk_token=_a, sep_token=_a, pad_token=_a, cls_token=_a, mask_token=_a, additional_special_tokens=_a, sp_model_kwargs=self.sp_model_kwargs, **_a, )
_A = 3
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __UpperCAmelCase ( self : Tuple ) -> int:
return len(self.sp_model )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_A = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Union[str, Any]:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : List[str], UpperCamelCase__ : Optional[Any] ) -> Tuple:
_A = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : str ) -> Optional[Any]:
if self.remove_space:
_A = ' '.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('``', '"' ).replace('\'\'', '"' )
if not self.keep_accents:
_A = unicodedata.normalize('NFKD', _a )
_A = ''.join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Optional[Any] ) -> Any:
_A = self.preprocess_text(_a )
_A = self.sp_model.encode(_a, out_type=_a )
_A = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a, '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
return self.sp_model.PieceToId(_a )
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Any ) -> List[str]:
return self.sp_model.IdToPiece(_a )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : int ) -> int:
_A = ''.join(_a ).replace(_a, ' ' ).strip()
return out_string
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : str = False, UpperCamelCase__ : List[str] = None, UpperCamelCase__ : Tuple = True, **UpperCamelCase__ : Any, ) -> Tuple:
_A = kwargs.pop('use_source_tokenizer', _a )
_A = self.convert_ids_to_tokens(_a, skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_A = []
_A = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
_A = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_A = ''.join(_a )
_A = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_A = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[int] = None ) -> Tuple:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple = None, UpperCamelCase__ : Optional[int] = False ) -> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a, token_ids_a=_a, already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : List[str] = None ) -> Union[str, Any]:
_A = [self.sep_token_id]
_A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self : str, UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] = None ) -> Tuple:
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_A = os.path.join(
_a, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a, 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1E-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = relative_attention
__a = max_relative_positions
__a = pad_token_id
__a = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__a = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__a = pos_att_type
__a = vocab_size
__a = layer_norm_eps
__a = kwargs.get('''pooler_hidden_size''' , _a )
__a = pooler_dropout
__a = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__a = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a_ : Any = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
a_ : str = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n'
a_ : Tuple = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
a_ : Dict = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n'
a_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 1_0, 1_0_0] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0 ) -> int:
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=_a ) as executor:
a__ = []
a__ = Counter()
a__ = 0
a__ = defaultdict(_a )
for task_id, (candidates, test_case) in enumerate(zip(_a , _a ) ):
for candidate in candidates:
a__ = candidate + '''\n''' + test_case
a__ = (test_program, timeout, task_id, completion_id[task_id])
a__ = executor.submit(_a , *_a )
futures.append(_a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_a ):
a__ = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
a__ , a__ = [], []
for result in results.values():
result.sort()
a__ = [r[1]['''passed'''] for r in result]
total.append(len(_a ) )
correct.append(sum(_a ) )
a__ = np.array(_a )
a__ = np.array(_a )
a__ = k
a__ = {f"pass@{k}": estimate_pass_at_k(_a , _a , _a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def estimator(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ = itertools.repeat(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
else:
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
a__ = iter(lowerCAmelCase__ )
return np.array([estimator(int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) , lowerCAmelCase__ ) for n, c in zip(lowerCAmelCase__ , lowerCAmelCase__ )] )
| 194 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __A ( UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase__ ,collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case__ ( self : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any ) ->Dict:
'''simple docstring'''
pass
def snake_case__ ( self : str ) ->Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) ->List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : List[str] , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Optional[Any]=None , **lowercase__ : List[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_a , _a )
_UpperCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel(_a )
_UpperCamelCase : str = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : Optional[int] , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[str]=None , **lowercase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.get_vision_text_model(_a , _a )
_UpperCamelCase : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
_UpperCamelCase : Tuple = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : int=None , **lowercase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : str = self.get_vision_text_model(_a , _a )
_UpperCamelCase : List[Any] = {"vision_model": vision_model, "text_model": text_model}
_UpperCamelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_a )
_UpperCamelCase : str = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : Optional[int] , lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : List[str]=None , **lowercase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : List[str] = self.get_vision_text_model(_a , _a )
_UpperCamelCase : List[str] = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
_UpperCamelCase : Dict = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
_UpperCamelCase : str = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
_UpperCamelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(_a )
_UpperCamelCase : Tuple = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
_UpperCamelCase : List[Any] = after_output[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
def snake_case__ ( self : List[Any] , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Optional[Any]=None , **lowercase__ : Dict ) ->Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : str = self.get_vision_text_model(_a , _a )
_UpperCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
_UpperCamelCase : Optional[Any] = model(
input_ids=_a , pixel_values=_a , attention_mask=_a , output_attentions=_a )
_UpperCamelCase : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(_a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : List[str] = to_atuple(vision_model.config.image_size )
_UpperCamelCase : Union[str, Any] = to_atuple(vision_model.config.patch_size )
_UpperCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase : Dict = output.text_model_output.attentions
self.assertEqual(len(_a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : Union[str, Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : str ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = np.abs((a - b) ).max()
self.assertLessEqual(_a , _a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def snake_case__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_a )
def snake_case__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_a )
def snake_case__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_a )
def snake_case__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**_a )
def snake_case__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : Any = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_a )
@slow
def snake_case__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.get_pretrained_model_and_inputs()
_UpperCamelCase : str = model_a(**_a )
_UpperCamelCase : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_a )
_UpperCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(_a )
_UpperCamelCase : Dict = model_a(**_a )
_UpperCamelCase : List[str] = after_outputs[0].numpy()
_UpperCamelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
@require_tf
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_UpperCamelCase : List[str] = 13
_UpperCamelCase : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCamelCase : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCamelCase : Union[str, Any] = random_attention_mask([batch_size, 4] )
_UpperCamelCase : Dict = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , lowercase__ : int , lowercase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = TFViTModel(_a , name="vision_model" )
_UpperCamelCase : List[str] = TFBertModel(_a , name="text_model" )
return vision_model, text_model
def snake_case__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCamelCase : List[Any] = TFViTModelTester(self )
_UpperCamelCase : str = TFBertModelTester(self )
_UpperCamelCase : Optional[int] = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_UpperCamelCase : str = 13
_UpperCamelCase : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCamelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCamelCase : Union[str, Any] = random_attention_mask([batch_size, 4] )
_UpperCamelCase : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def snake_case__ ( self : Any , lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : str=None , **lowercase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Optional[Any] = self.get_vision_text_model(_a , _a )
_UpperCamelCase : List[str] = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
_UpperCamelCase : Dict = model(
input_ids=_a , pixel_values=_a , attention_mask=_a , output_attentions=_a )
_UpperCamelCase : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCamelCase : List[str] = to_atuple(vision_model.config.image_size )
_UpperCamelCase : List[Any] = to_atuple(vision_model.config.patch_size )
_UpperCamelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase : Optional[int] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase : int = output.text_model_output.attentions
self.assertEqual(len(_a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : Any , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = TFDeiTModel(_a , name="vision_model" )
_UpperCamelCase : Optional[Any] = TFRobertaModel(_a , name="text_model" )
return vision_model, text_model
def snake_case__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCamelCase : Dict = TFDeiTModelTester(self )
_UpperCamelCase : str = TFRobertaModelTester(self )
_UpperCamelCase : Tuple = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase : Any = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_UpperCamelCase : Union[str, Any] = 13
_UpperCamelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCamelCase : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCamelCase : Optional[int] = random_attention_mask([batch_size, 4] )
_UpperCamelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def snake_case__ ( self : Dict , lowercase__ : Optional[int] , lowercase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = TFCLIPVisionModel(_a , name="vision_model" )
_UpperCamelCase : str = TFBertModel(_a , name="text_model" )
return vision_model, text_model
def snake_case__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = TFCLIPVisionModelTester(self )
_UpperCamelCase : Tuple = TFBertModelTester(self )
_UpperCamelCase : Tuple = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase : List[Any] = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase : Optional[int] = vision_config_and_inputs
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_a )
_UpperCamelCase : Dict = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_UpperCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCamelCase : List[Any] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_a , padding=_a , return_tensors="np" )
_UpperCamelCase : List[Any] = model(**_a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _a , atol=1e-3 ) )
| 435 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def a ( self , A_ , A_ , A_=False ):
_UpperCamelCase = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
_UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = embedding_size
def a ( self ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = TFMobileBertModel(config=_a )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(_a )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_a )
_UpperCamelCase = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = TFMobileBertForMaskedLM(config=_a )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = TFMobileBertForNextSentencePrediction(config=_a )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = TFMobileBertForPreTraining(config=_a )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(_a )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFMobileBertForSequenceClassification(config=_a )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFMobileBertForMultipleChoice(config=_a )
_UpperCamelCase = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_UpperCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFMobileBertForTokenClassification(config=_a )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
_UpperCamelCase = TFMobileBertForQuestionAnswering(config=_a )
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_UpperCamelCase = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def a ( self ):
_UpperCamelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_a )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_a )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_a )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_a )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_a )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_a )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_a )
def a ( self ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_a )
@slow
def a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCamelCase = TFMobileBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a ( self ):
_UpperCamelCase = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(_a )[0]
_UpperCamelCase = [1, 6, 3_05_22]
self.assertEqual(output.shape , _a )
_UpperCamelCase = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 )
| 138 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE, unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] = GPTSwaTokenizer
lowercase : Tuple = False
lowercase : List[Any] = True
lowercase : Optional[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE =GPTSwaTokenizer(_a , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''This is a test'''
_SCREAMING_SNAKE_CASE ='''This is a test'''
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''<s>'''
_SCREAMING_SNAKE_CASE =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 2_0_0_0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =GPTSwaTokenizer(_a )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
_a , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
_SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
_SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(_a )
# fmt: off
self.assertListEqual(
_a , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =GPTSwaTokenizer(_a )
_SCREAMING_SNAKE_CASE =['''This is a test''', '''I was born in 92000, and this is falsé.''']
_SCREAMING_SNAKE_CASE =[
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_a , _a ):
self.assertListEqual(tokenizer.encode_fast(_a ) , _a )
# Test that decode_fast returns the input text
for text, token_ids in zip(_a , _a ):
self.assertEqual(tokenizer.decode_fast(_a ) , _a )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
_SCREAMING_SNAKE_CASE ={'''input_ids''': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_a , )
| 255 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _a (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,__a ,__a ,__a ,__a ,) -> List[str]:
super().__init__()
snake_case : Optional[int] = value_function
snake_case : List[str] = unet
snake_case : Union[str, Any] = scheduler
snake_case : str = env
snake_case : Dict = env.get_dataset()
snake_case : str = {}
for key in self.data.keys():
try:
snake_case : Dict = self.data[key].mean()
except: # noqa: E722
pass
snake_case : Dict = {}
for key in self.data.keys():
try:
snake_case : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
snake_case : int = env.observation_space.shape[0]
snake_case : Dict = env.action_space.shape[0]
def snake_case_ ( self ,__a ,__a ) -> List[Any]:
return (x_in - self.means[key]) / self.stds[key]
def snake_case_ ( self ,__a ,__a ) -> List[Any]:
return x_in * self.stds[key] + self.means[key]
def snake_case_ ( self ,__a ) -> Union[str, Any]:
if type(_a ) is dict:
return {k: self.to_torch(_a ) for k, v in x_in.items()}
elif torch.is_tensor(_a ):
return x_in.to(self.unet.device )
return torch.tensor(_a ,device=self.unet.device )
def snake_case_ ( self ,__a ,__a ,__a ) -> int:
for key, val in cond.items():
snake_case : Tuple = val.clone()
return x_in
def snake_case_ ( self ,__a ,__a ,__a ,__a ) -> Optional[int]:
snake_case : Optional[int] = x.shape[0]
snake_case : int = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case : str = torch.full((batch_size,) ,_a ,device=self.unet.device ,dtype=torch.long )
for _ in range(_a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case : List[str] = self.value_function(x.permute(0 ,2 ,1 ) ,_a ).sample
snake_case : str = torch.autograd.grad([y.sum()] ,[x] )[0]
snake_case : Optional[int] = self.scheduler._get_variance(_a )
snake_case : Any = torch.exp(0.5 * posterior_variance )
snake_case : Dict = model_std * grad
snake_case : str = 0
snake_case : str = x.detach()
snake_case : List[str] = x + scale * grad
snake_case : Dict = self.reset_xa(_a ,_a ,self.action_dim )
snake_case : List[str] = self.unet(x.permute(0 ,2 ,1 ) ,_a ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
snake_case : Any = self.scheduler.step(_a ,_a ,_a ,predict_epsilon=_a )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
snake_case : Union[str, Any] = self.reset_xa(_a ,_a ,self.action_dim )
snake_case : int = self.to_torch(_a )
return x, y
def __call__( self ,__a ,__a=64 ,__a=32 ,__a=2 ,__a=0.1 ) -> Union[str, Any]:
# normalize the observations and create batch dimension
snake_case : Union[str, Any] = self.normalize(_a ,"""observations""" )
snake_case : Optional[Any] = obs[None].repeat(_a ,axis=0 )
snake_case : Tuple = {0: self.to_torch(_a )}
snake_case : Tuple = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case : Any = randn_tensor(_a ,device=self.unet.device )
snake_case : Dict = self.reset_xa(_a ,_a ,self.action_dim )
snake_case : Tuple = self.to_torch(_a )
# run the diffusion process
snake_case , snake_case : int = self.run_diffusion(_a ,_a ,_a ,_a )
# sort output trajectories by value
snake_case : int = y.argsort(0 ,descending=_a ).squeeze()
snake_case : Any = x[sorted_idx]
snake_case : Union[str, Any] = sorted_values[:, :, : self.action_dim]
snake_case : List[Any] = actions.detach().cpu().numpy()
snake_case : Any = self.de_normalize(_a ,key="""actions""" )
# select the action with the highest value
if y is not None:
snake_case : Dict = 0
else:
# if we didn't run value guiding, select a random action
snake_case : Optional[int] = np.random.randint(0 ,_a )
snake_case : Optional[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 116 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowercase : Any , _lowercase : List[Any] , _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : List[str] = RemBertConfig.from_json_file(lowerCAmelCase__ )
print("Building PyTorch model from configuration: {}".format(str(lowerCAmelCase__ ) ) )
UpperCAmelCase : Union[str, Any] = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print("Save PyTorch model to {}".format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case_ : Optional[int] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 595 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'maskformer'
__UpperCAmelCase : Optional[int] = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase : Any = ['resnet', 'swin']
__UpperCAmelCase : Dict = ['detr']
def __init__( self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_snake_case = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
_snake_case = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_snake_case = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_snake_case = "allenai"
def lowerCAmelCase_ ( snake_case_ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_A : Optional[Any] = dict((re.sub(r"""@@$""","""""",lowerCAmelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""","""</w>""",lowerCAmelCase__ ), v) for k, v in d.items() )
_A : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
_A : List[str] = d[k] # restore
return da
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# prep
assert os.path.exists(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__,exist_ok=lowerCAmelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_A : Union[str, Any] = basename(lowerCAmelCase__ )
_A : Optional[int] = dirname(lowerCAmelCase__ )
_A : Dict = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_A : Union[str, Any] = cls.hub_models()
_A : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
_A : Dict = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
_A : List[str] = hub_utils.from_pretrained(
lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,archive_map=lowerCAmelCase__,**lowerCAmelCase__ )
_A : str = vars(chkpt["""args"""]["""model"""] )
_A : List[str] = args["""source_lang"""]
_A : Any = args["""target_lang"""]
_A : Optional[Any] = dirname(lowerCAmelCase__ )
_A : Optional[int] = basename(lowerCAmelCase__ )
# dicts
_A : Any = os.path.join(lowerCAmelCase__,f'''dict.{src_lang}.txt''' )
_A : Tuple = os.path.join(lowerCAmelCase__,f'''dict.{tgt_lang}.txt''' )
_A : Dict = Dictionary.load(lowerCAmelCase__ )
_A : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
_A : str = len(lowerCAmelCase__ )
_A : Optional[int] = os.path.join(lowerCAmelCase__,"""vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(lowerCAmelCase__,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase__,ensure_ascii=lowerCAmelCase__,indent=lowerCAmelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_A : List[Any] = True
for k in src_vocab.keys():
if not k.islower():
_A : List[str] = False
break
_A : Any = Dictionary.load(lowerCAmelCase__ )
_A : List[str] = rewrite_dict_keys(tgt_dict.indices )
_A : Any = len(lowerCAmelCase__ )
_A : Optional[int] = os.path.join(lowerCAmelCase__,"""vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(lowerCAmelCase__,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase__,ensure_ascii=lowerCAmelCase__,indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
_A : Dict = os.path.join(lowerCAmelCase__,VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_A : List[Any] = os.path.join(lowerCAmelCase__,lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
break
with open(lowerCAmelCase__,encoding="""utf-8""" ) as fin:
_A : int = fin.read()
_A : Optional[Any] = re.sub(r""" \d+$""","""""",lowerCAmelCase__,0,re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(lowerCAmelCase__,"""w""",encoding="""utf-8""" ) as fout:
fout.write(lowerCAmelCase__ )
# model config
_A : Dict = os.path.join(lowerCAmelCase__,"""config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
_A : Any = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
_A : Union[str, Any] = 5
_A : List[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_A : Dict = best_score_hparams[model_dir]["""length_penalty"""]
else:
_A : Optional[int] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(lowerCAmelCase__,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase__,ensure_ascii=lowerCAmelCase__,indent=lowerCAmelCase__ ) )
# tokenizer config
_A : Any = os.path.join(lowerCAmelCase__,lowerCAmelCase__ )
_A : Tuple = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(lowerCAmelCase__,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCAmelCase__,ensure_ascii=lowerCAmelCase__,indent=lowerCAmelCase__ ) )
# model
_A : Any = chkpt["""models"""][0]
_A : Dict = model.state_dict()
# rename keys to start with 'model.'
_A : int = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_A : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__,lowerCAmelCase__ )
_A : Union[str, Any] = FSMTConfig.from_pretrained(lowerCAmelCase__ )
_A : List[Any] = FSMTForConditionalGeneration(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__,strict=lowerCAmelCase__ )
# save
_A : Tuple = os.path.join(lowerCAmelCase__,lowerCAmelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowerCAmelCase__,lowerCAmelCase__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 307 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowercase ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
'''simple docstring'''
import baseaa
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def UpperCAmelCase ( lowerCamelCase_ :bytes ):
'''simple docstring'''
return baseaa.baadecode(lowerCAmelCase__ ).decode("""utf-8""" )
if __name__ == "__main__":
__A : Union[str, Any] = 'Hello World!'
__A : Union[str, Any] = baseaa_encode(test)
print(encoded)
__A : List[Any] = baseaa_decode(encoded)
print(decoded) | 334 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 1_6
lowercase_ = 3_2
def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> Optional[int]:
__a = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
__a = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
# Initialize accelerator
__a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a = config['''lr''']
__a = int(config['''num_epochs'''] )
__a = int(config['''seed'''] )
__a = int(config['''batch_size'''] )
__a = args.model_name_or_path
set_seed(lowerCAmelCase__ )
__a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
# Instantiate optimizer
__a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
__a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__a = 1
__a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , )
else:
__a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
__a = 0
# We also need to keep track of the stating epoch so files are named properly
__a = 0
# Now we train the model
__a = evaluate.load('''glue''' , '''mrpc''' )
__a = 0
__a = {}
for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
__a = model(**lowerCAmelCase__ )
__a = outputs.loss
__a = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__a = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a = model(**lowerCAmelCase__ )
__a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__a , __a = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
__a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ )
__a = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__a = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( ) -> List[str]:
__a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , )
parser.add_argument(
'''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , )
__a = parser.parse_args()
__a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 695 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase : int ="docs/source/en/_toctree.yml"
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
A__ = defaultdict(lowerCAmelCase__ )
A__ = []
A__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(lowerCAmelCase__ )
A__ = new_doc_list
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
A__ = sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase__ ) > 1:
raise ValueError("{doc_list} has two \'overview\' docs which is not allowed." )
overview_doc.extend(lowerCAmelCase__ )
# Sort
return overview_doc
def UpperCamelCase ( _lowerCamelCase : Dict=False ):
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["sections"]
# Then to the model doc
A__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A__ = api_doc[scheduler_idx]["sections"]
A__ = clean_doc_toc(lowerCAmelCase__ )
A__ = False
if new_scheduler_doc != scheduler_doc:
A__ = True
if overwrite:
A__ = new_scheduler_doc
if diff:
if overwrite:
A__ = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def UpperCamelCase ( _lowerCamelCase : Union[str, Any]=False ):
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["sections"]
# Then to the model doc
A__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A__ = False
A__ = api_doc[pipeline_idx]["sections"]
A__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A__ = pipeline_doc["section"]
A__ = clean_doc_toc(lowerCAmelCase__ )
if overwrite:
A__ = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase__ )
# sort overall pipeline doc
A__ = clean_doc_toc(lowerCAmelCase__ )
if new_pipeline_docs != pipeline_docs:
A__ = True
if overwrite:
A__ = new_pipeline_docs
if diff:
if overwrite:
A__ = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
__lowerCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCAmelCase : List[str] =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 440 |
"""simple docstring"""
from typing import Any
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , lowerCAmelCase__ : dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
__a = {}
__a = {}
for state in states_space:
__a = observations_space[0]
__a = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
__a = observations_space[o]
__a = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a = ''''''
__a = -1
for k_state in states_space:
__a = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a = probability
__a = k_state
# Update probabilities and pointers dicts
__a = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a = arg_max
# The final observation
__a = observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
__a = ''''''
__a = -1
for k_state in states_space:
__a = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a = probability
__a = k_state
__a = arg_max
# Process pointers backwards
__a = last_state
__a = []
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
__a = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : type , lowerCAmelCase__ : bool = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
__a = f'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
__a = f'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
__a = '''nested dictionary ''' if nested else ''''''
__a = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = (EulerDiscreteScheduler,)
__lowerCAmelCase = 1_0
def __UpperCAmelCase ( self : Any, **UpperCamelCase__ : Union[str, Any] ) -> int:
_A = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self : int ) -> List[Any]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001], [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a, beta_end=_a )
def __UpperCAmelCase ( self : List[str] ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_a, _a )
_A = model(_a, _a )
_A = scheduler.step(_a, _a, _a, generator=_a )
_A = output.prev_sample
_A = torch.sum(torch.abs(_a ) )
_A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_a, _a )
_A = model(_a, _a )
_A = scheduler.step(_a, _a, _a, generator=_a )
_A = output.prev_sample
_A = torch.sum(torch.abs(_a ) )
_A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps, device=_a )
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_A = sample.to(_a )
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_a, _a )
_A = model(_a, _a )
_A = scheduler.step(_a, _a, _a, generator=_a )
_A = output.prev_sample
_A = torch.sum(torch.abs(_a ) )
_A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __UpperCAmelCase ( self : int ) -> Any:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_a, use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps, device=_a )
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_A = sample.to(_a )
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_a, _a )
_A = model(_a, _a )
_A = scheduler.step(_a, _a, _a, generator=_a )
_A = output.prev_sample
_A = torch.sum(torch.abs(_a ) )
_A = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 107 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
a_ : List[Any] = 'Create a default config file for Accelerate with only a few flags set.'
def __a ( __UpperCAmelCase="no" , __UpperCAmelCase = default_json_config_file , __UpperCAmelCase = False ):
a__ = Path(lowerCAmelCase__ )
path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
a__ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}" )
a__ = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
a__ = torch.cuda.device_count()
a__ = num_gpus
a__ = False
if num_gpus > 1:
a__ = '''MULTI_GPU'''
else:
a__ = '''NO'''
elif is_xpu_available() and use_xpu:
a__ = torch.xpu.device_count()
a__ = num_xpus
a__ = False
if num_xpus > 1:
a__ = '''MULTI_XPU'''
else:
a__ = '''NO'''
elif is_npu_available():
a__ = torch.npu.device_count()
a__ = num_npus
a__ = False
if num_npus > 1:
a__ = '''MULTI_NPU'''
else:
a__ = '''NO'''
else:
a__ = 0
a__ = True
a__ = 1
a__ = '''NO'''
a__ = ClusterConfig(**lowerCAmelCase__ )
config.to_json_file(lowerCAmelCase__ )
return path
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = parser.add_parser('''default''' , parents=lowerCAmelCase__ , help=lowerCAmelCase__ , formatter_class=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=lowerCAmelCase__ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def __a ( __UpperCAmelCase ):
a__ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 194 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = 'roberta'
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any]=50_265 , lowercase__ : int=768 , lowercase__ : List[Any]=12 , lowercase__ : List[Any]=12 , lowercase__ : Dict=3_072 , lowercase__ : List[Any]="gelu" , lowercase__ : List[Any]=0.1 , lowercase__ : List[Any]=0.1 , lowercase__ : Union[str, Any]=512 , lowercase__ : Optional[int]=2 , lowercase__ : Optional[Any]=0.0_2 , lowercase__ : Optional[int]=1e-12 , lowercase__ : str=1 , lowercase__ : Union[str, Any]=0 , lowercase__ : Dict=2 , lowercase__ : Union[str, Any]="absolute" , lowercase__ : Optional[Any]=True , lowercase__ : Tuple=None , **lowercase__ : List[str] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : int = layer_norm_eps
_UpperCamelCase : Optional[int] = position_embedding_type
_UpperCamelCase : Optional[int] = use_cache
_UpperCamelCase : Tuple = classifier_dropout
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def snake_case__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 435 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
snake_case_ : str = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def lowercase__( _UpperCamelCase : Optional[int] )-> List[str]:
"""simple docstring"""
_UpperCamelCase = test_results.split(" " )
_UpperCamelCase = 0
_UpperCamelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCamelCase = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase__( _UpperCamelCase : int )-> str:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = None
_UpperCamelCase = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowerCAmelCase__ ):
_UpperCamelCase = True
_UpperCamelCase = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_UpperCamelCase = line
_UpperCamelCase = False
return failures
class A_ :
'''simple docstring'''
def __init__( self , A_ , A_ ):
_UpperCamelCase = title
_UpperCamelCase = doc_test_results["time_spent"].split("," )[0]
_UpperCamelCase = doc_test_results["success"]
_UpperCamelCase = doc_test_results["failures"]
_UpperCamelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCamelCase = doc_test_results
@property
def a ( self ):
_UpperCamelCase = [self._time_spent]
_UpperCamelCase = 0
for time in time_spent:
_UpperCamelCase = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_UpperCamelCase = [0, 0, time_parts[0]]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F"{int(_a )}h{int(_a )}m{int(_a )}s"
@property
def a ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def a ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def a ( self ):
_UpperCamelCase = 40
_UpperCamelCase = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(_a , _a )}
_UpperCamelCase = ""
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def a ( self ):
_UpperCamelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def a ( ):
_UpperCamelCase = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_a , )
def a ( self ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_UpperCamelCase = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
_UpperCamelCase = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_a , )
def a ( self , A_ , A_ , A_ , A_ ):
_UpperCamelCase = ""
for key, value in failures.items():
_UpperCamelCase = value[:2_00] + " [Truncated]" if len(_a ) > 2_50 else value
failures_text += F"*{key}*\n_{value}_\n\n"
_UpperCamelCase = job_name
_UpperCamelCase = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_UpperCamelCase = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a ( self ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_UpperCamelCase = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_UpperCamelCase = sorted(self.doc_test_results.items() , key=lambda A_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_UpperCamelCase = F"*Num failures* :{len(job_result['failed'] )} \n"
_UpperCamelCase = job_result["failures"]
_UpperCamelCase = self.get_reply_blocks(_a , _a , _a , text=_a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=_a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def lowercase__( )-> Any:
"""simple docstring"""
_UpperCamelCase = os.environ["GITHUB_RUN_ID"]
_UpperCamelCase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
_UpperCamelCase = requests.get(lowerCAmelCase__ ).json()
_UpperCamelCase = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_UpperCamelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
_UpperCamelCase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowerCAmelCase__ )
return {}
def lowercase__( _UpperCamelCase : str )-> Dict:
"""simple docstring"""
_UpperCamelCase = {}
if os.path.exists(lowerCAmelCase__ ):
_UpperCamelCase = os.listdir(lowerCAmelCase__ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding="utf-8" ) as f:
_UpperCamelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}." ) from e
return _artifact
def lowercase__( )-> Dict:
"""simple docstring"""
class A_ :
'''simple docstring'''
def __init__( self , A_ ):
_UpperCamelCase = name
_UpperCamelCase = []
def __str__( self ):
return self.name
def a ( self , A_ ):
self.paths.append({"name": self.name, "path": path} )
_UpperCamelCase = {}
_UpperCamelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCamelCase = directory
if artifact_name not in _available_artifacts:
_UpperCamelCase = Artifact(lowerCAmelCase__ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase__ )
return _available_artifacts
if __name__ == "__main__":
snake_case_ : Union[str, Any] = get_job_links()
snake_case_ : List[Any] = retrieve_available_artifacts()
snake_case_ : List[str] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
snake_case_ : Any = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
snake_case_ : List[str] = github_actions_job_links.get('''run_doctests''')
snake_case_ : Any = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
snake_case_ : List[str] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
snake_case_ , snake_case_ , snake_case_ : Optional[int] = handle_test_results(artifact['''stats'''])
snake_case_ : int = failed
snake_case_ : str = success
snake_case_ : List[Any] = time_spent[1:-1] + ''', '''
snake_case_ : Union[str, Any] = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
snake_case_ : Tuple = line.replace('''FAILED ''', '''''')
snake_case_ : Optional[int] = line.split()[0].replace('''\n''', '''''')
if "::" in line:
snake_case_ , snake_case_ : int = line.split('''::''')
else:
snake_case_ , snake_case_ : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
snake_case_ : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
snake_case_ : Optional[int] = all_failures[test] if test in all_failures else '''N/A'''
snake_case_ : Optional[int] = failure
break
snake_case_ : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 138 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase : Tuple = 'autoformer'
lowercase : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _A = None , _A = None , _A = "student_t" , _A = "nll" , _A = 1 , _A = [1, 2, 3, 4, 5, 6, 7] , _A = True , _A = 0 , _A = 0 , _A = 0 , _A = 0 , _A = None , _A = None , _A = 6_4 , _A = 2 , _A = 2 , _A = 2 , _A = 2 , _A = 3_2 , _A = 3_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 0.1 , _A = 1_0_0 , _A = 0.02 , _A = True , _A=True , _A = 1_0 , _A = 2_5 , _A = 3 , **_A , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =prediction_length
_SCREAMING_SNAKE_CASE =context_length if context_length is not None else prediction_length
_SCREAMING_SNAKE_CASE =distribution_output
_SCREAMING_SNAKE_CASE =loss
_SCREAMING_SNAKE_CASE =input_size
_SCREAMING_SNAKE_CASE =num_time_features
_SCREAMING_SNAKE_CASE =lags_sequence
_SCREAMING_SNAKE_CASE =scaling
_SCREAMING_SNAKE_CASE =num_dynamic_real_features
_SCREAMING_SNAKE_CASE =num_static_real_features
_SCREAMING_SNAKE_CASE =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_SCREAMING_SNAKE_CASE =cardinality
else:
_SCREAMING_SNAKE_CASE =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_SCREAMING_SNAKE_CASE =embedding_dimension
else:
_SCREAMING_SNAKE_CASE =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_SCREAMING_SNAKE_CASE =num_parallel_samples
# Transformer architecture configuration
_SCREAMING_SNAKE_CASE =input_size * len(self.lags_sequence ) + self._number_of_features
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =decoder_layerdrop
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =use_cache
# Autoformer
_SCREAMING_SNAKE_CASE =label_length
_SCREAMING_SNAKE_CASE =moving_average
_SCREAMING_SNAKE_CASE =autocorrelation_factor
super().__init__(is_encoder_decoder=_a , **_a )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 255 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = False
__UpperCAmelCase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(1_0_0, 2_0_0)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ""
lowercase_ = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 695 | 0 |
'''simple docstring'''
from math import factorial
def lowerCamelCase__ ( __lowercase , __lowercase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCAmelCase__ ) // (factorial(lowerCAmelCase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 116 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __UpperCAmelCase ( self ):
__a = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(_a , env=os.environ.copy() )
| 695 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
snake_case_ : Any = 2
class snake_case__ :
def __init__( self : Union[str, Any] , *, # begin keyword-only arguments
lowercase : Any="<s>" , lowercase : List[str]="<pad>" , lowercase : int="</s>" , lowercase : List[str]="<unk>" , lowercase : Any=None , ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = bos, unk, pad, eos
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Tuple = []
UpperCAmelCase : str = {}
UpperCAmelCase : Optional[Any] = self.add_symbol(_a )
UpperCAmelCase : List[Any] = self.add_symbol(_a )
UpperCAmelCase : List[Any] = self.add_symbol(_a )
UpperCAmelCase : Dict = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
UpperCAmelCase : Optional[Any] = len(self.symbols )
def __eq__( self : int , lowercase : List[str] ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Any , lowercase : Optional[Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase : int = cls()
d.add_from_file(_a )
return d
def __lowerCAmelCase ( self : Optional[int] , lowercase : Tuple , lowercase : str=1 , lowercase : Optional[Any]=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase : Optional[int] = self.indices[word]
UpperCAmelCase : List[str] = self.count[idx] + n
return idx
else:
UpperCAmelCase : Optional[int] = len(self.symbols )
UpperCAmelCase : Optional[int] = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def __lowerCAmelCase ( self : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
return 0
def __lowerCAmelCase ( self : int , lowercase : int ):
'''simple docstring'''
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_a ) )
return
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : List[str] = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase , UpperCAmelCase : int = line.rsplit(" " , 1 )
else:
UpperCAmelCase : List[str] = False
UpperCAmelCase : Tuple = int(_a )
UpperCAmelCase : Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: \'{}\'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_a ) )
self.add_symbol(_a , n=_a , overwrite=_a )
except ValueError:
raise ValueError("Incorrect dictionary format, expected \'<token> <cnt> [flags]\'" )
def lowercase_ ( _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : int = dict((re.sub(R"@@$" , "" , lowerCAmelCase__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , lowerCAmelCase__ ), v) for k, v in d.items() )
UpperCAmelCase : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
UpperCAmelCase : Optional[Any] = d[k] # restore
return da
def lowercase_ ( _lowercase : str , _lowercase : str ):
'''simple docstring'''
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , "checkpoint.pt" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
UpperCAmelCase : List[Any] = torch.load(lowerCAmelCase__ , map_location="cpu" )
UpperCAmelCase : str = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase__ , "dict.txt" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
UpperCAmelCase : Optional[Any] = Dictionary.load(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase : Dict = len(lowerCAmelCase__ )
UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES["vocab_file"] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase : Tuple = os.path.join(lowerCAmelCase__ , "bpecodes" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
UpperCAmelCase : int = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
UpperCAmelCase : Any = os.path.join(lowerCAmelCase__ , "config.json" )
UpperCAmelCase : Union[str, Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
UpperCAmelCase : Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : List[Any] = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 10_24,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
UpperCAmelCase : int = chkpt["model"]
# remove unneeded keys
UpperCAmelCase : Any = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : List[str] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase : str = model_state_dict.pop(lowerCAmelCase__ )
else:
UpperCAmelCase : List[Any] = model_state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase : Any = BioGptConfig.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase : List[str] = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
UpperCAmelCase : str = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print("Conversion is done!" )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 595 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = BertTokenizer
__UpperCAmelCase : Optional[Any] = BertTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self , _a ):
__a = '''UNwant\u00E9d,running'''
__a = '''unwanted, running'''
return input_text, output_text
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
__a = self.get_tokenizer(do_lower_case=_a )
__a = self.get_rust_tokenizer(do_lower_case=_a )
__a = '''UNwant\u00E9d,running'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = BasicTokenizer()
__a = '''a\n\'ll !!to?\'d of, can\'t.'''
__a = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 695 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = 'megatron-bert'
def __init__( self , _a=2_9056 , _a=1024 , _a=24 , _a=16 , _a=4096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a="absolute" , _a=True , **_a , ) -> Tuple:
super().__init__(pad_token_id=_a , **_a )
_A : str = vocab_size
_A : Optional[Any] = hidden_size
_A : int = num_hidden_layers
_A : Union[str, Any] = num_attention_heads
_A : Tuple = hidden_act
_A : Dict = intermediate_size
_A : str = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : str = max_position_embeddings
_A : List[str] = type_vocab_size
_A : Optional[Any] = initializer_range
_A : Any = layer_norm_eps
_A : Optional[Any] = position_embedding_type
_A : Any = use_cache
| 307 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = XLMTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = False
def a_ ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase = dict(zip(_a , range(len(_a ) ) ) )
UpperCAmelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_a ) )
def a_ ( self , lowercase_ ) -> List[Any]:
UpperCAmelCase = 'lower newer'
UpperCAmelCase = 'lower newer'
return input_text, output_text
def a_ ( self ) -> Any:
UpperCAmelCase = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase = 'lower'
UpperCAmelCase = ['low', 'er</w>']
UpperCAmelCase = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
UpperCAmelCase = tokens + ['<unk>']
UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@slow
def a_ ( self ) -> str:
UpperCAmelCase = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=_a )
UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=_a )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_a )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 373 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> Any:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(set_a.intersection(lowerCAmelCase__ ) )
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
else:
__a = len(set_a.union(lowerCAmelCase__ ) )
return intersection / union
if isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(lowerCAmelCase__ ) + len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
return None
if __name__ == "__main__":
lowercase_ = {"a", "b", "c", "d", "e"}
lowercase_ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 695 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : List[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
lowercase : List[str] = 'wavlm'
def __init__( self :Tuple ,_UpperCamelCase :str=3_2 ,_UpperCamelCase :str=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :Optional[Any]=1_2 ,_UpperCamelCase :Union[str, Any]=3_0_7_2 ,_UpperCamelCase :str="gelu" ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :str=0.0 ,_UpperCamelCase :Union[str, Any]=0.1 ,_UpperCamelCase :Union[str, Any]=0.1 ,_UpperCamelCase :Optional[Any]=0.02 ,_UpperCamelCase :Dict=1E-5 ,_UpperCamelCase :Union[str, Any]="group" ,_UpperCamelCase :str="gelu" ,_UpperCamelCase :int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,_UpperCamelCase :Tuple=(5, 2, 2, 2, 2, 2, 2) ,_UpperCamelCase :Optional[int]=(1_0, 3, 3, 3, 3, 2, 2) ,_UpperCamelCase :str=False ,_UpperCamelCase :Optional[Any]=1_2_8 ,_UpperCamelCase :Any=1_6 ,_UpperCamelCase :List[str]=3_2_0 ,_UpperCamelCase :int=8_0_0 ,_UpperCamelCase :List[Any]=False ,_UpperCamelCase :int=True ,_UpperCamelCase :Optional[Any]=0.05 ,_UpperCamelCase :int=1_0 ,_UpperCamelCase :Optional[int]=2 ,_UpperCamelCase :Dict=0.0 ,_UpperCamelCase :Dict=1_0 ,_UpperCamelCase :List[str]=3_2_0 ,_UpperCamelCase :int=2 ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :Tuple=1_0_0 ,_UpperCamelCase :Any=2_5_6 ,_UpperCamelCase :Dict=2_5_6 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Optional[int]="mean" ,_UpperCamelCase :int=False ,_UpperCamelCase :Union[str, Any]=False ,_UpperCamelCase :Union[str, Any]=2_5_6 ,_UpperCamelCase :Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) ,_UpperCamelCase :str=(5, 3, 3, 1, 1) ,_UpperCamelCase :Tuple=(1, 2, 3, 1, 1) ,_UpperCamelCase :List[str]=5_1_2 ,_UpperCamelCase :Union[str, Any]=8_0 ,_UpperCamelCase :Union[str, Any]=0 ,_UpperCamelCase :List[Any]=1 ,_UpperCamelCase :Dict=2 ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :int=3 ,_UpperCamelCase :Any=2 ,_UpperCamelCase :int=3 ,_UpperCamelCase :Union[str, Any]=None ,**_UpperCamelCase :Dict ,):
super().__init__(**_a ,pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a )
snake_case_ : str = hidden_size
snake_case_ : List[Any] = feat_extract_norm
snake_case_ : Any = feat_extract_activation
snake_case_ : Optional[Any] = list(_a )
snake_case_ : Union[str, Any] = list(_a )
snake_case_ : Optional[int] = list(_a )
snake_case_ : List[Any] = conv_bias
snake_case_ : Any = num_buckets
snake_case_ : Any = max_bucket_distance
snake_case_ : Union[str, Any] = num_conv_pos_embeddings
snake_case_ : Union[str, Any] = num_conv_pos_embedding_groups
snake_case_ : Union[str, Any] = len(self.conv_dim )
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Tuple = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Optional[int] = activation_dropout
snake_case_ : Union[str, Any] = feat_proj_dropout
snake_case_ : Any = final_dropout
snake_case_ : List[str] = layerdrop
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : Any = initializer_range
snake_case_ : List[str] = num_ctc_classes
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : str = do_stable_layer_norm
snake_case_ : Dict = use_weighted_layer_sum
snake_case_ : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : List[str] = apply_spec_augment
snake_case_ : int = mask_time_prob
snake_case_ : List[Any] = mask_time_length
snake_case_ : str = mask_time_min_masks
snake_case_ : str = mask_feature_prob
snake_case_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case_ : Any = num_codevectors_per_group
snake_case_ : Dict = num_codevector_groups
snake_case_ : Tuple = contrastive_logits_temperature
snake_case_ : Tuple = num_negatives
snake_case_ : Tuple = codevector_dim
snake_case_ : Optional[int] = proj_codevector_dim
snake_case_ : Tuple = diversity_loss_weight
# ctc loss
snake_case_ : int = ctc_loss_reduction
snake_case_ : List[Any] = ctc_zero_infinity
# adapter
snake_case_ : Any = add_adapter
snake_case_ : Tuple = adapter_kernel_size
snake_case_ : Tuple = adapter_stride
snake_case_ : Any = num_adapter_layers
snake_case_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ : Any = list(_a )
snake_case_ : Optional[Any] = list(_a )
snake_case_ : Dict = list(_a )
snake_case_ : Dict = xvector_output_dim
@property
def a__ ( self :int ):
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 334 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def UpperCamelCase ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray ):
if dataset.ndim != value_array.ndim:
A__ = (
"Wrong input data\'s dimensions... "
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(lowerCAmelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ = (
"Wrong input data\'s shape... "
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(lowerCAmelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
A__ = (
"Input data have different datatype... "
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(lowerCAmelCase__ )
A__ = []
for value in value_array:
A__ = euclidean(lowerCAmelCase__ , dataset[0] )
A__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ = euclidean(lowerCAmelCase__ , lowerCAmelCase__ )
if dist > temp_dist:
A__ = temp_dist
A__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray ):
return np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) / (norm(lowerCAmelCase__ ) * norm(lowerCAmelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'blip_2_vision_model'
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=0.0_0001 , _a=0.0 , _a=1E-10 , _a=True , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'blip_2_qformer'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__a = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'blip-2'
__UpperCAmelCase : List[str] = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ):
super().__init__(**_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__a = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__a = BlipaVisionConfig(**_a )
__a = BlipaQFormerConfig(**_a )
__a = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__a = CONFIG_MAPPING[text_model_type](**_a )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def __UpperCAmelCase ( cls , _a , _a , _a , **_a , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : int , __snake_case : List[str] , __snake_case : Dict ):
# Initialise PyTorch model
_A = FunnelConfig.from_json_file(lowerCAmelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
_A = FunnelBaseModel(lowerCAmelCase__ ) if base_model else FunnelModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1E-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = relative_attention
__a = max_relative_positions
__a = pad_token_id
__a = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__a = [x.strip() for x in pos_att_type.lower().split('''|''' )]
__a = pos_att_type
__a = vocab_size
__a = layer_norm_eps
__a = kwargs.get('''pooler_hidden_size''' , _a )
__a = pooler_dropout
__a = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__a = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 695 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __a ( __UpperCAmelCase , __UpperCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) as f:
a__ = json.load(lowerCAmelCase__ )
a__ = {}
a__ = []
a__ = []
for key, info in class_info.items():
a__ = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__ ) )
a__ = thing_ids
a__ = class_names
return metadata
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=3_0 , SCREAMING_SNAKE_CASE=4_0_0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=1_0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2_5_5 , SCREAMING_SNAKE_CASE="shi-labs/oneformer_demo" , SCREAMING_SNAKE_CASE="ade20k_panoptic.json" , SCREAMING_SNAKE_CASE=1_0 , ) -> str:
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = class_info_file
a__ = prepare_metadata(_a , _a )
a__ = num_text
a__ = repo_path
# for the post_process_functions
a__ = 2
a__ = 1_0
a__ = 1_0
a__ = 3
a__ = 4
a__ = num_labels
a__ = do_reduce_labels
a__ = ignore_index
def _UpperCAmelCase ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
if not batched:
a__ = image_inputs[0]
if isinstance(_a , Image.Image ):
a__ , a__ = image.size
else:
a__ , a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['''shortest_edge'''] * h / w )
a__ = self.size['''shortest_edge''']
elif w > h:
a__ = self.size['''shortest_edge''']
a__ = int(self.size['''shortest_edge'''] * w / h )
else:
a__ = self.size['''shortest_edge''']
a__ = self.size['''shortest_edge''']
else:
a__ = []
for image in image_inputs:
a__ , a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(_a , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
a__ = max(_a , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def _UpperCAmelCase ( self ) -> Optional[Any]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowercase : Union[str, Any] = image_processing_class
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = OneFormerImageProcessorTester(self )
@property
def _UpperCAmelCase ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Any:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''ignore_index''' ) )
self.assertTrue(hasattr(_a , '''class_info_file''' ) )
self.assertTrue(hasattr(_a , '''num_text''' ) )
self.assertTrue(hasattr(_a , '''repo_path''' ) )
self.assertTrue(hasattr(_a , '''metadata''' ) )
self.assertTrue(hasattr(_a , '''do_reduce_labels''' ) )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
a__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ = self.image_processing_tester.get_expected_values(_a , batched=_a )
a__ = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Any:
# Initialize image_processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
a__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ = self.image_processing_tester.get_expected_values(_a , batched=_a )
a__ = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processor
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
a__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processing_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ = self.image_processing_tester.get_expected_values(_a , batched=_a )
a__ = image_processor(
_a , ['''semantic'''] * len(_a ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="np" ) -> Optional[int]:
a__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
a__ = self.image_processing_tester.num_labels
a__ = None
a__ = None
a__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=_a )
if with_segmentation_maps:
a__ = num_labels
if is_instance_map:
a__ = list(range(_a ) ) * 2
a__ = dict(enumerate(_a ) )
a__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
a__ = [Image.fromarray(_a ) for annotation in annotations]
a__ = image_processor(
_a , ['''semantic'''] * len(_a ) , _a , return_tensors='''pt''' , instance_id_to_semantic_id=_a , pad_and_return_pixel_mask=_a , )
return inputs
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> Any:
def common(SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None ):
a__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=_a , is_instance_map=_a , segmentation_type=_a )
a__ = inputs['''mask_labels''']
a__ = inputs['''class_labels''']
a__ = inputs['''pixel_values''']
a__ = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(_a , _a , _a ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_a ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_a )
common(is_instance_map=_a , segmentation_type='''pil''' )
common(is_instance_map=_a , segmentation_type='''pil''' )
def _UpperCAmelCase ( self ) -> int:
a__ = np.zeros((2_0, 5_0) )
a__ = 1
a__ = 1
a__ = 1
a__ = binary_mask_to_rle(_a )
self.assertEqual(len(_a ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a__ = self.image_processing_tester.get_fake_oneformer_outputs()
a__ = fature_extractor.post_process_semantic_segmentation(_a )
self.assertEqual(len(_a ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
a__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
a__ = fature_extractor.post_process_semantic_segmentation(_a , target_sizes=_a )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a__ = self.image_processing_tester.get_fake_oneformer_outputs()
a__ = image_processor.post_process_instance_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _a )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _UpperCAmelCase ( self ) -> Dict:
a__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
a__ = self.image_processing_tester.get_fake_oneformer_outputs()
a__ = image_processor.post_process_panoptic_segmentation(_a , threshold=0 )
self.assertTrue(len(_a ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , _a )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 194 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 0 |
'''simple docstring'''
import math
def __A ( UpperCAmelCase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCAmelCase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __A ( UpperCAmelCase = 1_0_0_0_1 ) -> int:
'''simple docstring'''
try:
_UpperCamelCase : Optional[int] = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Dict = 2
while len(lowerCAmelCase__ ) < nth:
if is_prime(lowerCAmelCase__ ):
primes.append(lowerCAmelCase__ )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435 |
"""simple docstring"""
from __future__ import annotations
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def __UpperCAmelCase ( self ):
__a = abs(self.pos_x - self.goal_x )
__a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _a ):
return self.f_cost < other.f_cost
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a )
__a = [self.start]
__a = []
__a = False
def __UpperCAmelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(_a )
self.closed_nodes.append(_a )
__a = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self , _a ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __UpperCAmelCase ( self , _a ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase_ = GreedyBestFirst(init, goal)
lowercase_ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase_ = 2
for elem in grid:
print(elem)
| 695 | 0 |
'''simple docstring'''
import math
def lowercase__( _UpperCamelCase : int )-> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__( _UpperCamelCase : float = 0.1 )-> int:
"""simple docstring"""
_UpperCamelCase = 3
_UpperCamelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
UpperCAmelCase_ : Tuple = list[tuple[int, int]]
UpperCAmelCase_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pos_x
_SCREAMING_SNAKE_CASE =pos_y
_SCREAMING_SNAKE_CASE =(pos_y, pos_x)
_SCREAMING_SNAKE_CASE =goal_x
_SCREAMING_SNAKE_CASE =goal_y
_SCREAMING_SNAKE_CASE =parent
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =Node(start[1] , start[0] , goal[1] , goal[0] , _a )
_SCREAMING_SNAKE_CASE =Node(goal[1] , goal[0] , goal[1] , goal[0] , _a )
_SCREAMING_SNAKE_CASE =[self.start]
_SCREAMING_SNAKE_CASE =False
def UpperCamelCase_ ( self ):
'''simple docstring'''
while self.node_queue:
_SCREAMING_SNAKE_CASE =self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_SCREAMING_SNAKE_CASE =True
return self.retrace_path(_a )
_SCREAMING_SNAKE_CASE =self.get_successors(_a )
for node in successors:
self.node_queue.append(_a )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for action in delta:
_SCREAMING_SNAKE_CASE =parent.pos_x + action[1]
_SCREAMING_SNAKE_CASE =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_a , _a , self.target.pos_y , self.target.pos_x , _a ) )
return successors
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =node
_SCREAMING_SNAKE_CASE =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_SCREAMING_SNAKE_CASE =current_node.parent
path.reverse()
return path
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BreadthFirstSearch(_a , _a )
_SCREAMING_SNAKE_CASE =BreadthFirstSearch(_a , _a )
_SCREAMING_SNAKE_CASE =False
def UpperCamelCase_ ( self ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_SCREAMING_SNAKE_CASE =self.fwd_bfs.node_queue.pop(0 )
_SCREAMING_SNAKE_CASE =self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_SCREAMING_SNAKE_CASE =True
return self.retrace_bidirectional_path(
_a , _a )
_SCREAMING_SNAKE_CASE =current_bwd_node
_SCREAMING_SNAKE_CASE =current_fwd_node
_SCREAMING_SNAKE_CASE ={
self.fwd_bfs: self.fwd_bfs.get_successors(_a ),
self.bwd_bfs: self.bwd_bfs.get_successors(_a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase_ ( self , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.fwd_bfs.retrace_path(_a )
_SCREAMING_SNAKE_CASE =self.bwd_bfs.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
_SCREAMING_SNAKE_CASE =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = (0, 0)
UpperCAmelCase_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase_ : List[str] = time.time()
UpperCAmelCase_ : Union[str, Any] = BreadthFirstSearch(init, goal)
UpperCAmelCase_ : Union[str, Any] = bfs.search()
UpperCAmelCase_ : List[Any] = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCAmelCase_ : str = time.time()
UpperCAmelCase_ : Tuple = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase_ : List[Any] = bd_bfs.search()
UpperCAmelCase_ : str = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 255 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id='''test-model-flax''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def __UpperCAmelCase ( self ):
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_a )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_a , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=f'''{key} not identical''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_a )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size='''10KB''' )
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_a ):
__a = FlaxBertModel.from_pretrained(_a )
__a = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 695 | 0 |
'''simple docstring'''
import math
def lowerCamelCase__ ( __lowercase ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCAmelCase__ )
if number < 1:
snake_case : Optional[Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCAmelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
snake_case : Any = int(math.log(number // 3 , 2 ) ) + 2
snake_case : List[Any] = [3, 5]
snake_case : List[str] = 2
snake_case : str = 3
for block in range(1 , lowerCAmelCase__ ):
for _ in range(lowerCAmelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase : Tuple = 0
try:
lowercase : Optional[int] = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 116 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
snake_case_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 595 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase_ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'maskformer'
__UpperCAmelCase : Optional[int] = {'hidden_size': 'mask_feature_size'}
__UpperCAmelCase : Any = ['resnet', 'swin']
__UpperCAmelCase : Dict = ['detr']
def __init__( self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a = DetrConfig()
else:
# verify that the decoder is supported
__a = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__a = CONFIG_MAPPING[decoder_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = decoder_config
# main feature dimension for the model
__a = fpn_feature_size
__a = mask_feature_size
# initializer
__a = init_std
__a = init_xavier_std
# Hungarian matcher && loss
__a = cross_entropy_weight
__a = dice_weight
__a = mask_weight
__a = use_auxiliary_loss
__a = no_object_weight
__a = output_auxiliary_logits
__a = self.decoder_config.encoder_attention_heads
__a = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , **_a ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.decoder_config.to_dict()
__a = self.__class__.model_type
return output
| 695 | 0 |
def lowerCAmelCase_ ( snake_case_ = 10**9 ):
_A : List[Any] = 1
_A : Any = 2
_A : Tuple = 0
_A : Tuple = 0
_A : Optional[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A : Optional[int] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 307 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowercase ( lowerCAmelCase__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 695 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE_ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE_ = logging.WARNING
def lowercase__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase = os.getenv('DATASETS_VERBOSITY' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option DATASETS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def lowercase__ ( ) -> str:
"""simple docstring"""
return __name__.split('.' )[0]
def lowercase__ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase__ ( lowerCAmelCase : Optional[str] = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
UpperCAmelCase = _get_library_name()
return logging.getLogger(lowerCAmelCase__ )
def lowercase__ ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowercase__ ( lowerCAmelCase : int ) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
return set_verbosity(lowerCAmelCase__ )
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
return set_verbosity(lowerCAmelCase__ )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
return set_verbosity(lowerCAmelCase__ )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
return set_verbosity(lowerCAmelCase__ )
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase = False
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _UpperCAmelCase :
def __init__( self , *lowercase_ , **lowercase_ ) -> Optional[int]: # pylint: disable=unused-argument
UpperCAmelCase = args[0] if args else None
def __iter__( self ) -> Dict:
return iter(self._iterator )
def __getattr__( self , lowercase_ ) -> Optional[Any]:
def empty_fn(*lowercase_ , **lowercase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Any:
return self
def __exit__( self , lowercase_ , lowercase_ , lowercase_ ) -> Any:
return
SCREAMING_SNAKE_CASE_ = True
class _UpperCAmelCase :
def __call__( self , *lowercase_ , lowercase_=False , **lowercase_ ) -> List[str]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_a , **_a )
else:
return EmptyTqdm(*_a , **_a )
def a_ ( self , *lowercase_ , **lowercase_ ) -> Optional[Any]:
UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a , **_a )
def a_ ( self ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE_ = _tqdm_cls()
def lowercase__ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase = True
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase = False
| 373 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : str = '''ViTImageProcessor'''
SCREAMING_SNAKE_CASE__ : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self :List[str] , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self :Tuple , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :str ) -> Any:
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None:
__SCREAMING_SNAKE_CASE : Any = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
__SCREAMING_SNAKE_CASE : str = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if visual_prompt is not None and images is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__SCREAMING_SNAKE_CASE : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __magic_name__( self :Any , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :int ) -> Any:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[str] , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :Optional[int] ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , )
return self.image_processor
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
__SCREAMING_SNAKE_CASE : Dict = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__SCREAMING_SNAKE_CASE : Optional[int] = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__SCREAMING_SNAKE_CASE : Dict = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
__lowerCAmelCase : Tuple =[int(x) for x in input().split()]
# inputing elements of the list in one line
__lowerCAmelCase : List[Any] =odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ShapEPipeline
SCREAMING_SNAKE_CASE__ : int = ['''prompt''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''prompt''']
SCREAMING_SNAKE_CASE__ : str = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE__ : List[Any] = False
@property
def __magic_name__( self :str ) -> int:
return 32
@property
def __magic_name__( self :str ) -> int:
return 32
@property
def __magic_name__( self :Tuple ) -> str:
return self.time_input_dim * 4
@property
def __magic_name__( self :Any ) -> Optional[int]:
return 8
@property
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __magic_name__( self :Any ) -> Any:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__ )
@property
def __magic_name__( self :Dict ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__SCREAMING_SNAKE_CASE : Tuple = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ShapERenderer(**lowerCAmelCase__ )
return model
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Any = self.dummy_renderer
__SCREAMING_SNAKE_CASE : Tuple = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int=0 ) -> Union[str, Any]:
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : str = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu'''
__SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__( self :Optional[int] ) -> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Any = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __magic_name__( self :int ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size * [inputs[key]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__SCREAMING_SNAKE_CASE : Tuple = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(
'''a shark''' , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple[:-1] + ('''weight''',)
__SCREAMING_SNAKE_CASE : Tuple = torch.permute(lowercase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase__ ):
# linear layer
__SCREAMING_SNAKE_CASE : int = flax_key_tuple[:-1] + ('''weight''',)
__SCREAMING_SNAKE_CASE : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if "metadata" in layer:
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer.split('''metadata''' )
__SCREAMING_SNAKE_CASE : Tuple = ''''''.join(split_layer[0] )[:-1]
__SCREAMING_SNAKE_CASE : Tuple = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__SCREAMING_SNAKE_CASE : Optional[int] = layer.split('''kvstore''' )
__SCREAMING_SNAKE_CASE : List[str] = ''''''.join(split_layer[0] )[:-1]
__SCREAMING_SNAKE_CASE : Any = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__SCREAMING_SNAKE_CASE : Tuple = layer.split('''/''' )
__SCREAMING_SNAKE_CASE : Optional[int] = '''/'''.join(split_layer[:-1] )
__SCREAMING_SNAKE_CASE : str = (split_layer[-1],)
if "kvstore/path" in layer:
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__SCREAMING_SNAKE_CASE : Any = '''file'''
else:
__SCREAMING_SNAKE_CASE : Any = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = rename_keys(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = {}
for k, v in current_block.items():
__SCREAMING_SNAKE_CASE : Any = v
__SCREAMING_SNAKE_CASE : Union[str, Any] = new_current_block
torch.save(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = WEIGHTS_NAME ):
__SCREAMING_SNAKE_CASE : Dict = convert_file_size_to_int(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Tuple = 0
os.makedirs(lowercase__ , exist_ok=lowercase__ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Dict = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__SCREAMING_SNAKE_CASE : Any = flatten_dict(lowercase__ , sep='''/''' )
__SCREAMING_SNAKE_CASE : int = {}
for layer in checkpoint_info.keys():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_key_and_tensorstore_dict(
lowercase__ , lowercase__ , lowercase__ )
if curr_real_layer_name in all_layers:
__SCREAMING_SNAKE_CASE : int = content
else:
__SCREAMING_SNAKE_CASE : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__SCREAMING_SNAKE_CASE : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase__ )
__SCREAMING_SNAKE_CASE : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = rename_base_flax_keys(tuple(key.split('''/''' ) ) , lowercase__ )
__SCREAMING_SNAKE_CASE : str = '''/'''.join(lowercase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
lowercase__ , weights_name.replace('''.bin''' , F'''-{len(lowercase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : int = raw_weights.to(getattr(lowercase__ , lowercase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase__ , weights_name.replace('''.bin''' , F'''-{len(lowercase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowercase__ , lowercase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for idx, shard in enumerate(lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(lowercase__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__SCREAMING_SNAKE_CASE : Any = os.path.join(lowercase__ , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : Dict = shard
for key in shard:
__SCREAMING_SNAKE_CASE : List[str] = shard_file
# Add the metadata
__SCREAMING_SNAKE_CASE : int = {'''total_size''': total_size}
__SCREAMING_SNAKE_CASE : Tuple = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowercase__ , lowercase__ ) , '''w''' , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '''\n'''
f.write(lowercase__ )
return metadata, index
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__lowerCAmelCase : Tuple =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCamelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__SCREAMING_SNAKE_CASE : int = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__SCREAMING_SNAKE_CASE : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__SCREAMING_SNAKE_CASE : List[Any] = TaTokenizer.from_pretrained('''t5-small''' )
__SCREAMING_SNAKE_CASE : Tuple = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(lowercase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
from math import pi, sqrt, tan
def _UpperCamelCase ( lowercase__ ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCamelCase ( lowercase__ ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _UpperCamelCase ( lowercase__ ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
__SCREAMING_SNAKE_CASE : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(lowercase__ , 2 ) * torus_radius * tube_radius
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _UpperCamelCase ( lowercase__ ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sidea + sidea + sidea) / 2
__SCREAMING_SNAKE_CASE : Optional[int] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _UpperCamelCase ( lowercase__ ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""")
print(f"""Square: {area_square(1_0) = }""")
print(f"""Triangle: {area_triangle(1_0, 1_0) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""")
print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""")
print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""")
print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""")
print(f"""Circle: {area_circle(2_0) = }""")
print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(2_0) = }""")
print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""")
print(f"""Sphere: {surface_area_sphere(2_0) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""")
print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""")
print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""")
print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""")
print(f"""Square: {area_reg_polygon(4, 1_0) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''bit'''
SCREAMING_SNAKE_CASE__ : Tuple = ['''preactivation''', '''bottleneck''']
SCREAMING_SNAKE_CASE__ : Any = ['''SAME''', '''VALID''']
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :List[Any]=64 , lowerCAmelCase__ :Optional[int]=[256, 512, 1_024, 2_048] , lowerCAmelCase__ :str=[3, 4, 6, 3] , lowerCAmelCase__ :Union[str, Any]="preactivation" , lowerCAmelCase__ :str="relu" , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :str=False , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Dict=1 , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Optional[int] , ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__SCREAMING_SNAKE_CASE : Union[str, Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : List[str] = embedding_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
__SCREAMING_SNAKE_CASE : Tuple = depths
__SCREAMING_SNAKE_CASE : Optional[int] = layer_type
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = global_padding
__SCREAMING_SNAKE_CASE : Tuple = num_groups
__SCREAMING_SNAKE_CASE : int = drop_path_rate
__SCREAMING_SNAKE_CASE : Dict = embedding_dynamic_padding
__SCREAMING_SNAKE_CASE : Dict = output_stride
__SCREAMING_SNAKE_CASE : Union[str, Any] = width_factor
__SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
def _UpperCamelCase ( lowercase__ = 100 ):
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : str = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = torch.exp(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.sum(lowercase__ , dim=1 ) # sum of exp(x_i)
__SCREAMING_SNAKE_CASE : str = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowercase__ ) - B / A
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Tuple ) -> Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = config.output_attentions
__SCREAMING_SNAKE_CASE : int = config.output_hidden_states
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList([BertLayer(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
__SCREAMING_SNAKE_CASE : List[str] = nn.ModuleList([BertHighway(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
__SCREAMING_SNAKE_CASE : List[str] = [-1 for _ in range(config.num_hidden_layers )]
def __magic_name__( self :int , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
if (type(lowerCAmelCase__ ) is float) or (type(lowerCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = x
else:
__SCREAMING_SNAKE_CASE : Optional[int] = x
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : str = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Any=None , ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[str] = ()
__SCREAMING_SNAKE_CASE : Tuple = ()
__SCREAMING_SNAKE_CASE : int = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : List[Any] = all_hidden_states + (hidden_states,)
__SCREAMING_SNAKE_CASE : Any = layer_module(
lowerCAmelCase__ , lowerCAmelCase__ , head_mask[i] , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = layer_outputs[0]
if self.output_attentions:
__SCREAMING_SNAKE_CASE : str = all_attentions + (layer_outputs[1],)
__SCREAMING_SNAKE_CASE : Any = (hidden_states,)
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
__SCREAMING_SNAKE_CASE : Any = current_outputs + (all_attentions,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.highway[i](lowerCAmelCase__ )
# logits, pooled_output
if not self.training:
__SCREAMING_SNAKE_CASE : Union[str, Any] = highway_exit[0]
__SCREAMING_SNAKE_CASE : Dict = entropy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__SCREAMING_SNAKE_CASE : Optional[int] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__SCREAMING_SNAKE_CASE : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCAmelCase__ , i + 1 )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Optional[int] = all_hidden_states + (hidden_states,)
__SCREAMING_SNAKE_CASE : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
__SCREAMING_SNAKE_CASE : Tuple = outputs + (all_hidden_states,)
if self.output_attentions:
__SCREAMING_SNAKE_CASE : int = outputs + (all_attentions,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , A__ , )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :List[str] ) -> int:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = config
__SCREAMING_SNAKE_CASE : int = BertEmbeddings(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = DeeBertEncoder(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = BertPooler(lowerCAmelCase__ )
self.init_weights()
def __magic_name__( self :int ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def __magic_name__( self :List[str] ) -> str:
return self.embeddings.word_embeddings
def __magic_name__( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = value
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase__ )
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[Any]=None , ) -> List[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.size()
elif inputs_embeds is not None:
__SCREAMING_SNAKE_CASE : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__SCREAMING_SNAKE_CASE : List[str] = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if encoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : Any = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if token_type_ids is None:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__SCREAMING_SNAKE_CASE : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_attention_mask[:, None, None, :]
__SCREAMING_SNAKE_CASE : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers )
__SCREAMING_SNAKE_CASE : Tuple = self.embeddings(
input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.encoder(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
__SCREAMING_SNAKE_CASE : int = self.pooler(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = message
__SCREAMING_SNAKE_CASE : Optional[int] = exit_layer # start from 1!
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
super().__init__()
__SCREAMING_SNAKE_CASE : str = BertPooler(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE : List[str] = nn.Linear(config.hidden_size , config.num_labels )
def __magic_name__( self :Any , lowerCAmelCase__ :List[Any] ) -> List[str]:
# Pooler
__SCREAMING_SNAKE_CASE : str = encoder_outputs[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.pooler(lowerCAmelCase__ )
# "return" pooler_output
# BertModel
__SCREAMING_SNAKE_CASE : List[str] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__SCREAMING_SNAKE_CASE : int = bmodel_output[1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.classifier(lowerCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , A__ , )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Tuple ) -> Dict:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = config.num_labels
__SCREAMING_SNAKE_CASE : int = config.num_hidden_layers
__SCREAMING_SNAKE_CASE : str = DeeBertModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
__SCREAMING_SNAKE_CASE : int = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def __magic_name__( self :int , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[Any]=-1 , lowerCAmelCase__ :Any=False , ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.num_layers
try:
__SCREAMING_SNAKE_CASE : Dict = self.bert(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__SCREAMING_SNAKE_CASE : Any = outputs[1]
__SCREAMING_SNAKE_CASE : Any = self.dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.classifier(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__SCREAMING_SNAKE_CASE : str = e.message
__SCREAMING_SNAKE_CASE : Optional[Any] = e.exit_layer
__SCREAMING_SNAKE_CASE : str = outputs[0]
if not self.training:
__SCREAMING_SNAKE_CASE : str = entropy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : int = MSELoss()
__SCREAMING_SNAKE_CASE : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for highway_exit in outputs[-1]:
__SCREAMING_SNAKE_CASE : str = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__SCREAMING_SNAKE_CASE : Optional[int] = MSELoss()
__SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase__ )
if train_highway:
__SCREAMING_SNAKE_CASE : Optional[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__SCREAMING_SNAKE_CASE : List[Any] = (loss,) + outputs
if not self.training:
__SCREAMING_SNAKE_CASE : str = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__SCREAMING_SNAKE_CASE : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : int ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''swin'''
SCREAMING_SNAKE_CASE__ : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=224 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=96 , lowerCAmelCase__ :Union[str, Any]=[2, 2, 6, 2] , lowerCAmelCase__ :Dict=[3, 6, 12, 24] , lowerCAmelCase__ :List[Any]=7 , lowerCAmelCase__ :Tuple=4.0 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :List[str]=1E-5 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = image_size
__SCREAMING_SNAKE_CASE : Any = patch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Optional[int] = embed_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = depths
__SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = num_heads
__SCREAMING_SNAKE_CASE : str = window_size
__SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE : Optional[Any] = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
__SCREAMING_SNAKE_CASE : Tuple = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = version.parse('''1.11''' )
@property
def __magic_name__( self :int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__( self :List[str] ) -> float:
return 1E-4
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__lowerCAmelCase : Any ='__DUMMY_TRANSFORMERS_USER__'
__lowerCAmelCase : str ='Dummy User'
__lowerCAmelCase : List[str] ='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'
__lowerCAmelCase : List[Any] ='https://hub-ci.huggingface.co'
__lowerCAmelCase : Optional[Any] =CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
__lowerCAmelCase : Dict =CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
__lowerCAmelCase : List[Any] =Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , lowercase__ )
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , lowercase__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , lowercase__ )
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , lowercase__ )
@pytest.fixture
def _UpperCamelCase ( lowercase__ , lowercase__ ):
HfFolder.save_token(lowercase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _UpperCamelCase ( ):
return HfApi(endpoint=lowercase__ )
@pytest.fixture(scope='''session''' )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = HfFolder.get_token()
HfFolder.save_token(lowercase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowercase__ )
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
def _cleanup_repo(lowercase__ ):
hf_api.delete_repo(lowercase__ , token=lowercase__ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
@contextmanager
def _temporary_repo(lowercase__ ):
try:
yield repo_id
finally:
cleanup_repo(lowercase__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowercase__ , token=lowercase__ , repo_type='''dataset''' , private=lowercase__ )
hf_api.upload_file(
token=lowercase__ , path_or_fileobj=str(lowercase__ ) , path_in_repo='''data/text_data.txt''' , repo_id=lowercase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase__ , token=lowercase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
__SCREAMING_SNAKE_CASE : Dict = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowercase__ , token=lowercase__ , repo_type='''dataset''' , private=lowercase__ )
hf_api.upload_file(
token=lowercase__ , path_or_fileobj=str(lowercase__ ) , path_in_repo='''data.zip''' , repo_id=lowercase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase__ , token=lowercase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowercase__ , token=lowercase__ , repo_type='''dataset''' , private=lowercase__ )
hf_api.upload_file(
token=lowercase__ , path_or_fileobj=str(lowercase__ ) , path_in_repo='''data.zip''' , repo_id=lowercase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase__ , token=lowercase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
return hf_private_dataset_repo_zipped_img_data_
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowercase__ ) * abs(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
def _UpperCamelCase ( lowercase__ = 3 , lowercase__ = 7 , lowercase__ = 1000000 ):
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Dict = 1
for current_denominator in range(1 , limit + 1 ):
__SCREAMING_SNAKE_CASE : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__SCREAMING_SNAKE_CASE : Optional[Any] = current_numerator
__SCREAMING_SNAKE_CASE : Tuple = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : List[Any] =[
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def _UpperCamelCase ( lowercase__=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Tuple = None
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_module_factory(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase__ , config_name=lowerCAmelCase__ , hash=dataset_module.hash , )
__SCREAMING_SNAKE_CASE : int = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
__SCREAMING_SNAKE_CASE : Dict = cached_path(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
@pytest.mark.integration
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
__SCREAMING_SNAKE_CASE : Dict = dataset_module_factory('''wikipedia''' , cache_dir=lowercase__ )
__SCREAMING_SNAKE_CASE : str = import_main_class(dataset_module.module_path )
__SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__SCREAMING_SNAKE_CASE : List[str] = None
builder_instance.download_and_prepare()
__SCREAMING_SNAKE_CASE : Dict = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = dataset_module_factory('''wikipedia''' , cache_dir=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = import_main_class(dataset_module.module_path , dataset=lowercase__ )
__SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
__SCREAMING_SNAKE_CASE : Optional[Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds['''train'''] , lowercase__ )
assert next(iter(ds['''train'''] ) )
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[PIL.Image.Image], np.ndarray]
SCREAMING_SNAKE_CASE__ : Optional[List[bool]]
SCREAMING_SNAKE_CASE__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE : Tuple = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(lowercase__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE : Union[str, Any] = arr[cur - 1 :: -1] + arr[cur : len(lowercase__ )]
cur -= 1
return arr
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Optional[Any] =[int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase : Optional[int] ='https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase : List[str] =BASE_URL + '/user'
# https://github.com/settings/tokens
__lowerCAmelCase : int =os.environ.get('USER_TOKEN', '')
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = {
'''Authorization''': F'''token {auth_token}''',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(lowercase__ , headers=lowercase__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=lowercase__ )
env_command_parser(subparsers=lowercase__ )
launch_command_parser(subparsers=lowercase__ )
tpu_command_parser(subparsers=lowercase__ )
test_command_parser(subparsers=lowercase__ )
# Let's go
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
if not hasattr(lowercase__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase__ )
if __name__ == "__main__":
main()
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( A__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __magic_name__( lowerCAmelCase__ :ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def __magic_name__( self :int ) -> int:
raise NotImplementedError()
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Tuple ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : int = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = match[0]
__SCREAMING_SNAKE_CASE : str = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Tuple = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Dict = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : str = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Dict = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=1e-12 ):
__SCREAMING_SNAKE_CASE : Tuple = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
__SCREAMING_SNAKE_CASE : List[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
return jnp.matmul(lowercase__ , norm_emb_a.T )
class _lowercase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : CLIPConfig
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = FlaxCLIPVisionModule(self.config.vision_config )
__SCREAMING_SNAKE_CASE : Any = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase__ , dtype=self.dtype )
__SCREAMING_SNAKE_CASE : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__SCREAMING_SNAKE_CASE : Any = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__SCREAMING_SNAKE_CASE : Dict = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self :Optional[Any] , lowerCAmelCase__ :Dict ) -> int:
__SCREAMING_SNAKE_CASE : str = self.vision_model(lowerCAmelCase__ )[1]
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = jax_cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Dict = jax_cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
__SCREAMING_SNAKE_CASE : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__SCREAMING_SNAKE_CASE : Dict = jnp.round(lowerCAmelCase__ , 3 )
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase__ )
# Use a lower threshold if an image has any special care concept
__SCREAMING_SNAKE_CASE : Optional[int] = is_special_care * 0.01
__SCREAMING_SNAKE_CASE : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__SCREAMING_SNAKE_CASE : int = jnp.round(lowerCAmelCase__ , 3 )
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''clip_input'''
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self :List[str] , lowerCAmelCase__ :CLIPConfig , lowerCAmelCase__ :Optional[Tuple] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :jnp.dtype = jnp.floataa , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
if input_shape is None:
__SCREAMING_SNAKE_CASE : Any = (1, 224, 224, 3)
__SCREAMING_SNAKE_CASE : Dict = self.module_class(config=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **lowerCAmelCase__ )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , input_shape=lowerCAmelCase__ , seed=lowerCAmelCase__ , dtype=lowerCAmelCase__ , _do_init=_do_init )
def __magic_name__( self :Any , lowerCAmelCase__ :jax.random.KeyArray , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :FrozenDict = None ) -> FrozenDict:
# init input tensor
__SCREAMING_SNAKE_CASE : List[Any] = jax.random.normal(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = jax.random.split(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
__SCREAMING_SNAKE_CASE : List[Any] = self.module.init(lowerCAmelCase__ , lowerCAmelCase__ )['''params''']
return random_params
def __call__( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :dict = None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__SCREAMING_SNAKE_CASE : str = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :Optional[Any] ) -> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Dict , **lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :int ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : Dict = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Dict = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : Optional[int] = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = '''lower newer'''
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''lower newer'''
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Tuple = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__( self :Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : int = processor.batch_decode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = '''lower newer'''
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Dict ='\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : str ='\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[Any] ='\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Optional[int]=False , ) -> Optional[int]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__SCREAMING_SNAKE_CASE : Tuple = np.array([re.sub(lowerCAmelCase__ , '''''' , lowerCAmelCase__ ) for x in predictions] )
__SCREAMING_SNAKE_CASE : Dict = np.array([re.sub(lowerCAmelCase__ , '''''' , lowerCAmelCase__ ) for x in references] )
else:
__SCREAMING_SNAKE_CASE : Any = np.asarray(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = np.asarray(lowerCAmelCase__ )
if ignore_case:
__SCREAMING_SNAKE_CASE : Any = np.char.lower(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.char.lower(lowerCAmelCase__ )
if ignore_punctuation:
__SCREAMING_SNAKE_CASE : str = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
__SCREAMING_SNAKE_CASE : List[str] = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
if ignore_numbers:
__SCREAMING_SNAKE_CASE : Union[str, Any] = string.digits.maketrans('''''' , '''''' , string.digits )
__SCREAMING_SNAKE_CASE : str = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = np.char.translate(lowerCAmelCase__ , table=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = predictions == references
return {"exact_match": np.mean(lowerCAmelCase__ ) * 100}
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : List[str] ={
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
import numpy
# List of input, output pairs
__lowerCAmelCase : Optional[int] =(
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
__lowerCAmelCase : List[Any] =(((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
__lowerCAmelCase : Union[str, Any] =[2, 4, 1, 5]
__lowerCAmelCase : List[str] =len(train_data)
__lowerCAmelCase : Union[str, Any] =0.0_0_9
def _UpperCamelCase ( lowercase__ , lowercase__="train" ):
return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output(
lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for i in range(len(lowercase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _UpperCamelCase ( lowercase__ , lowercase__=m ):
__SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(lowercase__ ):
if index == -1:
summation_value += _error(lowercase__ )
else:
summation_value += _error(lowercase__ ) * train_data[i][0][index]
return summation_value
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m
return cost_derivative_value
def _UpperCamelCase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__SCREAMING_SNAKE_CASE : str = 0.00_0002
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : str = 0
while True:
j += 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 0, 0, 0]
for i in range(0 , len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : List[Any] = get_cost_derivative(i - 1 )
__SCREAMING_SNAKE_CASE : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ):
break
__SCREAMING_SNAKE_CASE : Any = temp_parameter_vector
print(('''Number of iterations:''', j) )
def _UpperCamelCase ( ):
for i in range(len(lowercase__ ) ):
print(('''Actual output value:''', output(lowercase__ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(lowercase__ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : Any ={'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =['DPTFeatureExtractor']
__lowerCAmelCase : List[Any] =['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Callable , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[dict] = None , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
super().__init__(
features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : str = Generator(
cache_dir=lowerCAmelCase__ , features=lowerCAmelCase__ , generator=lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :List[Any] ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : Any = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowerCAmelCase : Optional[int] =(7_2_0, 1_2_8_0) # Height, Width
__lowerCAmelCase : Tuple =(0.4, 0.6) # if height or width lower than this scale, drop it.
__lowerCAmelCase : Optional[int] =1 / 1_0_0
__lowerCAmelCase : Any =''
__lowerCAmelCase : List[str] =''
__lowerCAmelCase : List[Any] =''
__lowerCAmelCase : Tuple =2_5_0
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataset(lowercase__ , lowercase__ )
for index in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = random.sample(range(len(lowercase__ ) ) , 4 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = update_image_and_anno(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , filter_scale=lowercase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__SCREAMING_SNAKE_CASE : str = random_chars(32 )
__SCREAMING_SNAKE_CASE : List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , lowercase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for anno in new_annos:
__SCREAMING_SNAKE_CASE : str = anno[3] - anno[1]
__SCREAMING_SNAKE_CASE : str = anno[4] - anno[2]
__SCREAMING_SNAKE_CASE : List[str] = anno[1] + width / 2
__SCREAMING_SNAKE_CASE : str = anno[2] + height / 2
__SCREAMING_SNAKE_CASE : List[str] = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(lowercase__ )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : List[Any] = []
for label_file in glob.glob(os.path.join(lowercase__ , '''*.txt''' ) ):
__SCREAMING_SNAKE_CASE : Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(lowercase__ ) as in_file:
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_file.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(lowercase__ , F'''{label_name}.jpg''' )
__SCREAMING_SNAKE_CASE : Tuple = []
for obj_list in obj_lists:
__SCREAMING_SNAKE_CASE : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
__SCREAMING_SNAKE_CASE : Tuple = float(obj[1] ) - float(obj[3] ) / 2
__SCREAMING_SNAKE_CASE : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
__SCREAMING_SNAKE_CASE : Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2
__SCREAMING_SNAKE_CASE : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 0.0 , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__SCREAMING_SNAKE_CASE : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__SCREAMING_SNAKE_CASE : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__SCREAMING_SNAKE_CASE : Tuple = int(scale_x * output_size[1] )
__SCREAMING_SNAKE_CASE : Dict = int(scale_y * output_size[0] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = []
for i, index in enumerate(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = all_img_list[index]
path_list.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = all_annos[index]
__SCREAMING_SNAKE_CASE : int = cva.imread(lowercase__ )
if i == 0: # top-left
__SCREAMING_SNAKE_CASE : Union[str, Any] = cva.resize(lowercase__ , (divid_point_x, divid_point_y) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[1] * scale_x
__SCREAMING_SNAKE_CASE : List[Any] = bbox[2] * scale_y
__SCREAMING_SNAKE_CASE : Any = bbox[3] * scale_x
__SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__SCREAMING_SNAKE_CASE : Tuple = cva.resize(lowercase__ , (output_size[1] - divid_point_x, divid_point_y) )
__SCREAMING_SNAKE_CASE : List[Any] = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE : Any = scale_x + bbox[1] * (1 - scale_x)
__SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[2] * scale_y
__SCREAMING_SNAKE_CASE : List[Any] = scale_x + bbox[3] * (1 - scale_x)
__SCREAMING_SNAKE_CASE : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__SCREAMING_SNAKE_CASE : str = cva.resize(lowercase__ , (divid_point_x, output_size[0] - divid_point_y) )
__SCREAMING_SNAKE_CASE : int = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE : Optional[int] = bbox[1] * scale_x
__SCREAMING_SNAKE_CASE : Any = scale_y + bbox[2] * (1 - scale_y)
__SCREAMING_SNAKE_CASE : Tuple = bbox[3] * scale_x
__SCREAMING_SNAKE_CASE : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__SCREAMING_SNAKE_CASE : Dict = cva.resize(
lowercase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__SCREAMING_SNAKE_CASE : List[str] = img
for bbox in img_annos:
__SCREAMING_SNAKE_CASE : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__SCREAMING_SNAKE_CASE : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__SCREAMING_SNAKE_CASE : int = scale_x + bbox[3] * (1 - scale_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__SCREAMING_SNAKE_CASE : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _UpperCamelCase ( lowercase__ ):
assert number_char > 1, "The number of character should greater than 1"
__SCREAMING_SNAKE_CASE : int = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] ={
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
import numpy as np
from transformers import Pipeline
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = np.max(lowercase__ , axis=-1 , keepdims=lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :Tuple , **lowerCAmelCase__ :Any ) -> str:
__SCREAMING_SNAKE_CASE : str = {}
if "second_text" in kwargs:
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any=None ) -> str:
return self.tokenizer(lowerCAmelCase__ , text_pair=lowerCAmelCase__ , return_tensors=self.framework )
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[str] ) -> List[Any]:
return self.model(**lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = model_outputs.logits[0].numpy()
__SCREAMING_SNAKE_CASE : Tuple = softmax(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = np.argmax(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.model.config.idalabel[best_class]
__SCREAMING_SNAKE_CASE : List[Any] = probabilities[best_class].item()
__SCREAMING_SNAKE_CASE : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
class _lowercase ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__( self :int , lowerCAmelCase__ :int = 65_536 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 2 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "fourier" , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase__ :Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase__ :Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase__ :str = None , lowerCAmelCase__ :Tuple[int] = (32, 32, 64) , lowerCAmelCase__ :str = None , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :bool = False , ) -> int:
super().__init__()
__SCREAMING_SNAKE_CASE : int = sample_size
# time
if time_embedding_type == "fourier":
__SCREAMING_SNAKE_CASE : Any = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase__ , log=lowerCAmelCase__ , flip_sin_to_cos=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__SCREAMING_SNAKE_CASE : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase__ , downscale_freq_shift=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = block_out_channels[0]
if use_timestep_embedding:
__SCREAMING_SNAKE_CASE : str = block_out_channels[0] * 4
__SCREAMING_SNAKE_CASE : Any = TimestepEmbedding(
in_channels=lowerCAmelCase__ , time_embed_dim=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , out_dim=block_out_channels[0] , )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : int = nn.ModuleList([] )
__SCREAMING_SNAKE_CASE : List[Any] = None
# down
__SCREAMING_SNAKE_CASE : Any = in_channels
for i, down_block_type in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = output_channel
__SCREAMING_SNAKE_CASE : Union[str, Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__SCREAMING_SNAKE_CASE : Dict = i == len(lowerCAmelCase__ ) - 1
__SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCAmelCase__ , num_layers=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase__ )
# mid
__SCREAMING_SNAKE_CASE : List[Any] = get_mid_block(
lowerCAmelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase__ , add_downsample=lowerCAmelCase__ , )
# up
__SCREAMING_SNAKE_CASE : List[Any] = list(reversed(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
__SCREAMING_SNAKE_CASE : Optional[int] = out_channels
else:
__SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = output_channel
__SCREAMING_SNAKE_CASE : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase__ ) - 1 else final_upsample_channels
)
__SCREAMING_SNAKE_CASE : int = i == len(lowerCAmelCase__ ) - 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_up_block(
lowerCAmelCase__ , num_layers=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = output_channel
# out
__SCREAMING_SNAKE_CASE : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__SCREAMING_SNAKE_CASE : List[Any] = get_out_block(
out_block_type=lowerCAmelCase__ , num_groups_out=lowerCAmelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase__ , act_fn=lowerCAmelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def __magic_name__( self :str , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Union[torch.Tensor, float, int] , lowerCAmelCase__ :bool = True , ) -> Union[UNetaDOutput, Tuple]:
__SCREAMING_SNAKE_CASE : int = timestep
if not torch.is_tensor(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase__ ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : str = timesteps[None].to(sample.device )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_proj(lowerCAmelCase__ )
if self.config.use_timestep_embedding:
__SCREAMING_SNAKE_CASE : List[str] = self.time_mlp(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : List[Any] = timestep_embed[..., None]
__SCREAMING_SNAKE_CASE : Optional[int] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__SCREAMING_SNAKE_CASE : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__SCREAMING_SNAKE_CASE : Any = ()
for downsample_block in self.down_blocks:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = downsample_block(hidden_states=lowerCAmelCase__ , temb=lowerCAmelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = down_block_res_samples[-1:]
__SCREAMING_SNAKE_CASE : Any = down_block_res_samples[:-1]
__SCREAMING_SNAKE_CASE : Dict = upsample_block(lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , temb=lowerCAmelCase__ )
# 5. post-process
if self.out_block:
__SCREAMING_SNAKE_CASE : Dict = self.out_block(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase__ )
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Union[str, Any] ={
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int]=1_024 , lowerCAmelCase__ :str=1_024 , lowerCAmelCase__ :Any=3.6 ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.bos_token_id
__SCREAMING_SNAKE_CASE : int = dataset
__SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
__SCREAMING_SNAKE_CASE : int = seq_length * chars_per_token * num_of_sequences
def __iter__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = iter(self.dataset )
__SCREAMING_SNAKE_CASE : Optional[int] = True
while more_examples:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
break
__SCREAMING_SNAKE_CASE : int = tokenizer(lowerCAmelCase__ , truncation=lowerCAmelCase__ )['''input_ids''']
__SCREAMING_SNAKE_CASE : Dict = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase__ ) , self.seq_length ):
__SCREAMING_SNAKE_CASE : Dict = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase__ ) == self.seq_length:
yield torch.tensor(lowerCAmelCase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''streaming''': True}
__SCREAMING_SNAKE_CASE : Dict = load_dataset(args.dataset_name , split='''train''' , **lowercase__ )
__SCREAMING_SNAKE_CASE : str = ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length )
__SCREAMING_SNAKE_CASE : Tuple = DataLoader(lowercase__ , batch_size=args.batch_size )
return eval_dataloader
def _UpperCamelCase ( lowercase__ ):
model.eval()
__SCREAMING_SNAKE_CASE : int = []
for step, batch in enumerate(lowercase__ ):
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ , labels=lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.cat(lowercase__ ) )
try:
__SCREAMING_SNAKE_CASE : int = torch.exp(lowercase__ )
except OverflowError:
__SCREAMING_SNAKE_CASE : Tuple = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__lowerCAmelCase : str =Accelerator()
# Parse configuration
__lowerCAmelCase : Union[str, Any] =HfArgumentParser(EvaluationArguments)
__lowerCAmelCase : List[str] =parser.parse_args()
set_seed(args.seed)
# Logging
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
__lowerCAmelCase : Optional[int] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__lowerCAmelCase : Dict =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__lowerCAmelCase : List[Any] =create_dataloader(args)
# Prepare everything with our `accelerator`.
__lowerCAmelCase ,__lowerCAmelCase : Union[str, Any] =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] =evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
from __future__ import annotations
__lowerCAmelCase : Any ='#'
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] ) -> None:
__SCREAMING_SNAKE_CASE : dict = {}
def __magic_name__( self :List[str] , lowerCAmelCase__ :str ) -> None:
__SCREAMING_SNAKE_CASE : int = self._trie
for char in text:
if char not in trie:
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : List[Any] = trie[char]
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> tuple | list:
__SCREAMING_SNAKE_CASE : Dict = self._trie
for char in prefix:
if char in trie:
__SCREAMING_SNAKE_CASE : Dict = trie[char]
else:
return []
return self._elements(lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :dict ) -> tuple:
__SCREAMING_SNAKE_CASE : List[str] = []
for c, v in d.items():
__SCREAMING_SNAKE_CASE : Dict = [''' '''] if c == END else [(c + s) for s in self._elements(lowerCAmelCase__ )]
result.extend(lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
__lowerCAmelCase : Tuple =Trie()
__lowerCAmelCase : Union[str, Any] =('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = trie.find_word(lowercase__ )
return tuple(string + word for word in suffixes )
def _UpperCamelCase ( ):
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''canine'''
def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
# Character config:
__SCREAMING_SNAKE_CASE : Tuple = downsampling_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size
__SCREAMING_SNAKE_CASE : Any = num_hash_functions
__SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets
__SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
| 696 | 1 |
from ....utils import logging
__lowerCAmelCase : Any =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Any=2_048 ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = config.__dict__
__SCREAMING_SNAKE_CASE : Dict = modal_hidden_size
if num_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''mems''']
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str:
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = []
self.cutoffs.extend(lowerCAmelCase__ )
if proj_share_all_but_first:
__SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs )
else:
__SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed
__SCREAMING_SNAKE_CASE : Tuple = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Optional[Any] = div_val
__SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : str = mem_len
__SCREAMING_SNAKE_CASE : Union[str, Any] = same_length
__SCREAMING_SNAKE_CASE : str = attn_type
__SCREAMING_SNAKE_CASE : Dict = clamp_len
__SCREAMING_SNAKE_CASE : Tuple = sample_softmax
__SCREAMING_SNAKE_CASE : Optional[int] = adaptive
__SCREAMING_SNAKE_CASE : int = dropout
__SCREAMING_SNAKE_CASE : Optional[Any] = dropatt
__SCREAMING_SNAKE_CASE : int = untie_r
__SCREAMING_SNAKE_CASE : Optional[int] = init
__SCREAMING_SNAKE_CASE : List[str] = init_range
__SCREAMING_SNAKE_CASE : Any = proj_init_std
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __magic_name__( self :str ) -> int:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 696 | 1 |
__lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert'''
def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
| 696 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__lowerCAmelCase : List[Any] =getLogger(__name__)
__lowerCAmelCase : List[str] ='cuda' if torch.cuda.is_available() else 'cpu'
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 8 , lowercase__ = DEFAULT_DEVICE , lowercase__=False , lowercase__="summarization" , lowercase__=None , **lowercase__ , ):
__SCREAMING_SNAKE_CASE : Any = Path(lowercase__ ).open('''w''' , encoding='''utf-8''' )
__SCREAMING_SNAKE_CASE : Dict = str(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ ).to(lowercase__ )
if fpaa:
__SCREAMING_SNAKE_CASE : Optional[Any] = model.half()
__SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(lowercase__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
# update config with task specific params
use_task_specific_params(lowercase__ , lowercase__ )
if prefix is None:
__SCREAMING_SNAKE_CASE : Optional[int] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(lowercase__ , lowercase__ ) ) ):
__SCREAMING_SNAKE_CASE : Tuple = [prefix + text for text in examples_chunk]
__SCREAMING_SNAKE_CASE : int = tokenizer(lowercase__ , return_tensors='''pt''' , truncation=lowercase__ , padding='''longest''' ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowercase__ , )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__SCREAMING_SNAKE_CASE : Optional[Any] = int(time.time() - start_time ) # seconds
__SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _UpperCamelCase ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _UpperCamelCase ( lowercase__=True ):
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=lowercase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=lowercase__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=lowercase__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=lowercase__ , required=lowercase__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=lowercase__ , required=lowercase__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=lowercase__ , required=lowercase__ , default=lowercase__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=lowercase__ , required=lowercase__ , default=lowercase__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=lowercase__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowercase__ , default=8 , required=lowercase__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=lowercase__ , default=-1 , required=lowercase__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=lowercase__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = parser.parse_known_args()
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(lowercase__ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
__SCREAMING_SNAKE_CASE : int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__SCREAMING_SNAKE_CASE : List[Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowercase__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__SCREAMING_SNAKE_CASE : List[str] = generate_summaries_or_translations(
lowercase__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowercase__ , )
if args.reference_path is None:
return {}
# Compute scores
__SCREAMING_SNAKE_CASE : Any = calculate_bleu if '''translation''' in args.task else calculate_rouge
__SCREAMING_SNAKE_CASE : Tuple = [x.rstrip() for x in open(args.save_path ).readlines()]
__SCREAMING_SNAKE_CASE : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowercase__ )]
__SCREAMING_SNAKE_CASE : dict = score_fn(lowercase__ , lowercase__ )
scores.update(lowercase__ )
if args.dump_args:
scores.update(lowercase__ )
if args.info:
__SCREAMING_SNAKE_CASE : List[Any] = args.info
if verbose:
print(lowercase__ )
if args.score_path is not None:
json.dump(lowercase__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 696 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 1 |
from bisect import bisect
from itertools import accumulate
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(zip(lowercase__ , lowercase__ ) , key=lambda lowercase__ : x[0] / x[1] , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = [i[0] for i in r], [i[1] for i in r]
__SCREAMING_SNAKE_CASE : str = list(accumulate(lowercase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = bisect(lowercase__ , lowercase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( lowercase__ ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 | 1 |
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE : List[Any] = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ya
__SCREAMING_SNAKE_CASE : str = xa
for k in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = y[k] + step_size * ode_func(lowercase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__SCREAMING_SNAKE_CASE : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__SCREAMING_SNAKE_CASE : Dict = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = model.state_dict()
def to_tf_var_name(lowercase__ ):
for patt, repl in iter(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = name.replace(lowercase__ , lowercase__ )
return F'''bert/{name}'''
def create_tf_var(lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE : Any = tf.get_variable(dtype=lowercase__ , shape=tensor.shape , name=lowercase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowercase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE : int = to_tf_var_name(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE : Optional[int] = torch_tensor.T
__SCREAMING_SNAKE_CASE : Optional[Any] = create_tf_var(tensor=lowercase__ , name=lowercase__ , session=lowercase__ )
tf.keras.backend.set_value(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = session.run(lowercase__ )
print(F'''Successfully created {tf_name}: {np.allclose(lowercase__ , lowercase__ )}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(lowercase__ , os.path.join(lowercase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _UpperCamelCase ( lowercase__=None ):
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowercase__ , required=lowercase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowercase__ , default=lowercase__ , required=lowercase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowercase__ , required=lowercase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowercase__ , required=lowercase__ , help='''Directory in which to save tensorflow model''' )
__SCREAMING_SNAKE_CASE : str = parser.parse_args(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowercase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 696 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : List[Any] ={'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase : int ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __magic_name__( cls :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def __magic_name__( cls :List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __magic_name__( self :Dict ) -> Optional[int]:
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = PretrainedConfig()
__SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase__ )}.''' )
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 500
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__( self :Union[str, Any] ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __magic_name__( self :str ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json''']
__SCREAMING_SNAKE_CASE : Tuple = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __magic_name__( self :List[str] ) -> Union[str, Any]:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0'''
__SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] ={
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = botoa.client('''iam''' )
__SCREAMING_SNAKE_CASE : Tuple = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase__ , AssumeRolePolicyDocument=json.dumps(lowercase__ , indent=2 ) )
__SCREAMING_SNAKE_CASE : str = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase__ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(lowercase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=lowercase__ )["Role"]["Arn"]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , lowercase__ , )
__SCREAMING_SNAKE_CASE : str = None
if credentials_configuration == 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
__SCREAMING_SNAKE_CASE : Dict = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field('''AWS Access Key ID: ''' )
__SCREAMING_SNAKE_CASE : Tuple = aws_access_key_id
__SCREAMING_SNAKE_CASE : str = _ask_field('''AWS Secret Access Key: ''' )
__SCREAMING_SNAKE_CASE : List[str] = aws_secret_access_key
__SCREAMING_SNAKE_CASE : Tuple = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
__SCREAMING_SNAKE_CASE : List[Any] = aws_region
__SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , lowercase__ , )
if role_management == 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field('''Enter your IAM role name: ''' )
else:
__SCREAMING_SNAKE_CASE : Tuple = '''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : List[str] = None
if is_custom_docker_image:
__SCREAMING_SNAKE_CASE : Dict = _ask_field('''Enter your Docker image: ''' , lambda lowercase__ : str(lowercase__ ).lower() )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : Dict = None
if is_sagemaker_inputs_enabled:
__SCREAMING_SNAKE_CASE : Optional[int] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda lowercase__ : str(lowercase__ ).lower() , )
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : Any = None
if is_sagemaker_metrics_enabled:
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda lowercase__ : str(lowercase__ ).lower() , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : int = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''dynamo_'''
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__SCREAMING_SNAKE_CASE : List[str] = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
__SCREAMING_SNAKE_CASE : Any = _ask_options(
'''Which mode do you want to use?''' , lowercase__ , lambda lowercase__ : TORCH_DYNAMO_MODES[int(lowercase__ )] , default='''default''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : str = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : Any = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
__SCREAMING_SNAKE_CASE : List[str] = _ask_options(
lowercase__ , lowercase__ , lambda lowercase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(lowercase__ , lambda lowercase__ : str(lowercase__ ).lower() , default='''ml.p3.2xlarge''' )
__SCREAMING_SNAKE_CASE : int = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
'''How many machines do you want use? [1]: ''' , lowercase__ , default=1 , )
__SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=lowercase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase__ , use_cpu=lowercase__ , dynamo_config=lowercase__ , eca_instance_type=lowercase__ , profile=lowercase__ , region=lowercase__ , iam_role_name=lowercase__ , mixed_precision=lowercase__ , num_machines=lowercase__ , sagemaker_inputs_file=lowercase__ , sagemaker_metrics_file=lowercase__ , )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE__ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
SCREAMING_SNAKE_CASE__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :Dict ) -> Tuple:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__SCREAMING_SNAKE_CASE : Tuple = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=32 , )
__SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=0 ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = image / 2 + 0.5
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def __magic_name__( self :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : int = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__( self :int ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
pass
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# forward without prompt embeds
__SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = 3 * ['''this is a negative prompt''']
__SCREAMING_SNAKE_CASE : List[Any] = negative_prompt
__SCREAMING_SNAKE_CASE : str = 3 * [inputs['''prompt''']]
__SCREAMING_SNAKE_CASE : Any = sd_pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['''this is a negative prompt''']
__SCREAMING_SNAKE_CASE : List[Any] = 3 * [inputs.pop('''prompt''' )]
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Any = sd_pipe.encode_prompt(lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(
**lowerCAmelCase__ , prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , pooled_prompt_embeds=lowerCAmelCase__ , negative_pooled_prompt_embeds=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int]="cpu" , lowerCAmelCase__ :Union[str, Any]=torch.floataa , lowerCAmelCase__ :Any=0 ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 696 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict =logging.get_logger()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_128s''' , pretrained=lowercase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_128''' , pretrained=lowercase__ )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_192''' , pretrained=lowercase__ )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE : Optional[Any] = timm.create_model('''levit_256''' , pretrained=lowercase__ )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_384''' , pretrained=lowercase__ )
from_model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = LevitForImageClassificationWithTeacher(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict()
__SCREAMING_SNAKE_CASE : str = from_model.state_dict()
__SCREAMING_SNAKE_CASE : List[str] = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(our_model.state_dict().keys() )
print(len(lowercase__ ) , len(lowercase__ ) )
for i in range(len(lowercase__ ) ):
__SCREAMING_SNAKE_CASE : Dict = weights[og_keys[i]]
our_model.load_state_dict(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE : int = from_model(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = our_model(lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE : Tuple = name
print(lowercase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE : List[Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None , lowercase__ = True ):
__SCREAMING_SNAKE_CASE : Tuple = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : Dict = 1000
__SCREAMING_SNAKE_CASE : Any = (1, num_labels)
__SCREAMING_SNAKE_CASE : List[str] = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : int = num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = idalabel
__SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[Any] = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
__SCREAMING_SNAKE_CASE : Tuple = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowercase__ , names_to_config[model_name] , lowercase__ , lowercase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCAmelCase : Tuple =parser.parse_args()
__lowerCAmelCase : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 696 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
def _UpperCamelCase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__SCREAMING_SNAKE_CASE : str = str(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCamelCase ( lowercase__ = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : List[str] = 1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(9_9)}""")
| 696 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class _lowercase ( A__ , A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 1
@register_to_config
def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]:
if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Any = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) )
def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.timesteps += self.config.steps_offset
def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : List[Any] = model_output
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __len__( self :Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 696 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : Union[str, Any] ={
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =['ConditionalDetrFeatureExtractor']
__lowerCAmelCase : List[str] =['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
SCREAMING_SNAKE_CASE__ : str = "text"
SCREAMING_SNAKE_CASE__ : str = "summary"
@property
def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 696 | 1 |
__lowerCAmelCase : List[str] ='Alexander Joslin'
import operator as op
from .stack import Stack
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
__SCREAMING_SNAKE_CASE : Stack[int] = Stack()
__SCREAMING_SNAKE_CASE : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase__ )
elif i == ")":
# RULE 4
__SCREAMING_SNAKE_CASE : List[Any] = operator_stack.peek()
operator_stack.pop()
__SCREAMING_SNAKE_CASE : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
__SCREAMING_SNAKE_CASE : Any = operand_stack.peek()
operand_stack.pop()
__SCREAMING_SNAKE_CASE : int = operators[opr](lowercase__ , lowercase__ )
operand_stack.push(lowercase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowerCAmelCase : str ='(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 696 |
def _UpperCamelCase ( lowercase__ = 10**9 ):
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 696 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowerCAmelCase : Dict ='pt'
elif is_tf_available():
__lowerCAmelCase : List[str] ='tf'
else:
__lowerCAmelCase : Union[str, Any] ='jax'
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def __magic_name__( self :Tuple ) -> Any:
super().setUp()
__SCREAMING_SNAKE_CASE : Tuple = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__( self :str ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Optional[int] ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Optional[int]=20 , lowerCAmelCase__ :Union[str, Any]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : str = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(filter(lambda lowerCAmelCase__ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__SCREAMING_SNAKE_CASE : Optional[int] = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__SCREAMING_SNAKE_CASE : int = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE : str = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE : Dict = ''' ''' + output_txt
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : List[Any] = '''Unicode €.'''
__SCREAMING_SNAKE_CASE : str = tokenizer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''[CLS]Unicode €.[SEP]''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer('''e è é ê ë''' )
__SCREAMING_SNAKE_CASE : List[str] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def __magic_name__( self :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__SCREAMING_SNAKE_CASE : List[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
__SCREAMING_SNAKE_CASE : str = list(batch.input_ids.numpy()[0] )
else:
__SCREAMING_SNAKE_CASE : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __magic_name__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__SCREAMING_SNAKE_CASE : Dict = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase__ )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __magic_name__( self :Optional[Any] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__SCREAMING_SNAKE_CASE : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = ''' He is very happy, UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Dict = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : int = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE : Tuple = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(125 )]
__SCREAMING_SNAKE_CASE : Optional[int] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__SCREAMING_SNAKE_CASE : str = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase__ )]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
pass
def __magic_name__( self :str ) -> Dict:
pass
def __magic_name__( self :List[Any] ) -> Any:
pass
def __magic_name__( self :str ) -> int:
pass
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE : str = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
if a[i].islower():
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase : List[Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
__lowerCAmelCase : Optional[int] =list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__lowerCAmelCase : int =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=A__ , metadata={'''help''': '''A folder containing the training data.'''} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=A__ , metadata={'''help''': '''A folder containing the validation data.'''} )
SCREAMING_SNAKE_CASE__ : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
SCREAMING_SNAKE_CASE__ : int = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
SCREAMING_SNAKE_CASE__ : float = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __magic_name__( self :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE : Tuple = self.validation_dir
__SCREAMING_SNAKE_CASE : str = data_files if data_files else None
@dataclass
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=A__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
SCREAMING_SNAKE_CASE__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
SCREAMING_SNAKE_CASE__ : str = field(default=A__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=A__ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class _lowercase :
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :str=192 , lowerCAmelCase__ :Dict=32 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Tuple=0.6 ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_size
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_patch_size
__SCREAMING_SNAKE_CASE : List[str] = model_patch_size
__SCREAMING_SNAKE_CASE : List[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
__SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size
__SCREAMING_SNAKE_CASE : str = self.mask_patch_size // self.model_patch_size
__SCREAMING_SNAKE_CASE : int = self.rand_size**2
__SCREAMING_SNAKE_CASE : Any = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = np.random.permutation(self.token_count )[: self.mask_count]
__SCREAMING_SNAKE_CASE : int = np.zeros(self.token_count , dtype=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : str = mask.reshape((self.rand_size, self.rand_size) )
__SCREAMING_SNAKE_CASE : Optional[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([example['''pixel_values'''] for example in examples] )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE : int = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE : Optional[int] = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE : Union[str, Any] = split['''train''']
__SCREAMING_SNAKE_CASE : Dict = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase__ , '''decoder_type''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''simmim'''
# adapt config
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_args.image_size if model_args.image_size is not None else config.image_size
__SCREAMING_SNAKE_CASE : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__SCREAMING_SNAKE_CASE : Optional[int] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE : Any = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForMaskedImageModeling.from_config(lowercase__ )
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE : List[str] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''img'''
else:
__SCREAMING_SNAKE_CASE : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__SCREAMING_SNAKE_CASE : int = Compose(
[
Lambda(lambda lowercase__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__SCREAMING_SNAKE_CASE : int = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
__SCREAMING_SNAKE_CASE : Any = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Any = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : List[str] = last_checkpoint
__SCREAMING_SNAKE_CASE : Any = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : List[str] = trainer.evaluate()
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
if __name__ == "__main__":
main()
| 696 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int:
if return_pvalue:
__SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
| 696 | 1 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Return True if there is node that has not iterated.
__SCREAMING_SNAKE_CASE : int = [False] * len(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = []
queue.append(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = True
while queue:
__SCREAMING_SNAKE_CASE : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Optional[int] = u
return visited[t]
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
# This array is filled by BFS and to store path
__SCREAMING_SNAKE_CASE : Any = [-1] * (len(lowercase__ ))
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = float('''Inf''' )
__SCREAMING_SNAKE_CASE : List[Any] = sink
while s != source:
# Find the minimum value in select path
__SCREAMING_SNAKE_CASE : List[str] = min(lowercase__ , graph[parent[s]][s] )
__SCREAMING_SNAKE_CASE : Tuple = parent[s]
max_flow += path_flow
__SCREAMING_SNAKE_CASE : Optional[Any] = sink
while v != source:
__SCREAMING_SNAKE_CASE : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__SCREAMING_SNAKE_CASE : List[str] = parent[v]
return max_flow
__lowerCAmelCase : str =[
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowerCAmelCase ,__lowerCAmelCase : List[Any] =0, 5
print(ford_fulkerson(graph, source, sink))
| 696 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict ='▁'
__lowerCAmelCase : Any =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ : str = BigBirdTokenizerFast
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def __magic_name__( self :str ) -> Any:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = '''<s>'''
__SCREAMING_SNAKE_CASE : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_004 )
def __magic_name__( self :str ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : int = '''I was born in 92000, and this is falsé.'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BigBirdTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [285, 46, 10, 170, 382] , )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__SCREAMING_SNAKE_CASE : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __magic_name__( self :Tuple ) -> Optional[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def __magic_name__( self :Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Hello World!'''
__SCREAMING_SNAKE_CASE : Optional[int] = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def __magic_name__( self :List[Any] ) -> Dict:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__SCREAMING_SNAKE_CASE : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
__SCREAMING_SNAKE_CASE : int = ''' '''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors='''pt''' , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdConfig(attention_type='''original_full''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdModel(lowerCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def __magic_name__( self :Any ) -> Optional[int]:
# fmt: off
__SCREAMING_SNAKE_CASE : str = {'''input_ids''': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 696 |
import os
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] )
__SCREAMING_SNAKE_CASE : str = len(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(lowercase__ ):
for j in range(n_rows - 3 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__SCREAMING_SNAKE_CASE : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__SCREAMING_SNAKE_CASE : Optional[int] = max(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if max_product > largest:
__SCREAMING_SNAKE_CASE : Tuple = max_product
return largest
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )]
return largest_product(lowercase__ )
if __name__ == "__main__":
print(solution())
| 696 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : str = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Dict = 768
__SCREAMING_SNAKE_CASE : Any = 3072
__SCREAMING_SNAKE_CASE : str = 12
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 4096
__SCREAMING_SNAKE_CASE : Dict = 16
__SCREAMING_SNAKE_CASE : Tuple = 24
__SCREAMING_SNAKE_CASE : Dict = 768
__SCREAMING_SNAKE_CASE : List[Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : int = 336
__SCREAMING_SNAKE_CASE : Optional[int] = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Tuple = 768
return config
def _UpperCamelCase ( lowercase__ ):
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : str = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim
]
__SCREAMING_SNAKE_CASE : List[Any] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Dict = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : int = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : Any = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : List[Any] = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : str = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : str = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : List[Any] = val[:dim, :]
__SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Optional[int] = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : List[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : str = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : List[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : Dict = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Any = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : Optional[Any] = val.T
__SCREAMING_SNAKE_CASE : str = val
return orig_state_dict
def _UpperCamelCase ( lowercase__ ):
if num_frames == 8:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Any = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Optional[int] = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowercase__ , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : List[str] = np.load(lowercase__ )
return list(lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[int] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : int = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : str = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 32
__SCREAMING_SNAKE_CASE : Dict = get_xclip_config(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Optional[int] = '''pytorch_model.bin'''
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase__ )['''model''']
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = XCLIPModel(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Dict = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : Optional[int] = VideoMAEImageProcessor(size=lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : int = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = prepare_video(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowercase__ , return_tensors='''pt''' , padding=lowercase__ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase__ )
# Verify outputs
__SCREAMING_SNAKE_CASE : List[str] = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : int = logits_per_video.softmax(dim=1 )
print('''Probs:''' , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(lowercase__ , lowercase__ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(lowercase__ , organization='''nielsr''' )
processor.push_to_hub(lowercase__ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(lowercase__ , organization='''nielsr''' )
if __name__ == "__main__":
__lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 696 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str:
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier
__SCREAMING_SNAKE_CASE : Dict = min_depth
__SCREAMING_SNAKE_CASE : List[str] = tf_padding
__SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier )
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = scope
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :Union[str, Any] ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __magic_name__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __magic_name__( self :List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __magic_name__( self :Any ) -> Dict:
pass
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 696 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''bart'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :Tuple , lowerCAmelCase__ :Tuple=50_265 , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :List[str]=4_096 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Union[str, Any]=0.0 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :Optional[Any]=0.02 , lowerCAmelCase__ :Optional[int]=0.0 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :str=2 , **lowerCAmelCase__ :List[str] , ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = d_model
__SCREAMING_SNAKE_CASE : List[str] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : Tuple = encoder_layers
__SCREAMING_SNAKE_CASE : str = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : List[str] = decoder_layers
__SCREAMING_SNAKE_CASE : Optional[int] = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
__SCREAMING_SNAKE_CASE : Dict = activation_dropout
__SCREAMING_SNAKE_CASE : List[Any] = activation_function
__SCREAMING_SNAKE_CASE : Dict = init_std
__SCREAMING_SNAKE_CASE : str = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
__SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
__SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
__SCREAMING_SNAKE_CASE : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
class _lowercase ( A__ ):
'''simple docstring'''
@property
def __magic_name__( self :str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE : Tuple = {0: '''batch'''}
__SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
__SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__( self :int ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Tuple = super().outputs
else:
__SCREAMING_SNAKE_CASE : Tuple = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE : Tuple = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE : List[str] = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = common_inputs['''input_ids'''].shape
__SCREAMING_SNAKE_CASE : Optional[int] = common_inputs['''decoder_input_ids'''].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
__SCREAMING_SNAKE_CASE : Tuple = min(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
__SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE : List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def __magic_name__( self :str , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : int = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE : Optional[int] = common_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def __magic_name__( self :int , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE : Optional[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE : str = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def __magic_name__( self :List[Any] , lowerCAmelCase__ :PreTrainedTokenizer , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :int = -1 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def __magic_name__( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE : List[str] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.