code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _a ( snake_case_ ):
_UpperCamelCase: Dict = ["pixel_values"]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = True , **lowercase_ , ) -> None:
super().__init__(**lowercase_ )
lowerCAmelCase : int = size if size is not None else {"""height""": 384, """width""": 384}
lowerCAmelCase : Dict = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : str = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Any = resample
lowerCAmelCase : str = do_rescale
lowerCAmelCase : List[Any] = rescale_factor
lowerCAmelCase : Tuple = do_normalize
lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase : Union[str, Any] = do_convert_rgb
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
lowerCAmelCase : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowerCAmelCase : int = (size["""height"""], size["""width"""])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Optional[Any]:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
lowerCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : List[str] = resample if resample is not None else self.resample
lowerCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : List[Any] = size if size is not None else self.size
lowerCAmelCase : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase : List[Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : Any = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : Tuple = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase : Optional[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase : Tuple = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase : Tuple = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase : List[str] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase : List[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowercase_ )
return encoded_outputs
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[Any] =get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Tuple = AlbertTokenizer
_UpperCamelCase: Any = AlbertTokenizerFast
_UpperCamelCase: List[Any] = True
_UpperCamelCase: Optional[int] = True
_UpperCamelCase: Optional[int] = True
def _snake_case ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : List[str] = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , lowercase_ ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = """this is a test"""
lowerCAmelCase : str = """this is a test"""
return input_text, output_text
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = """<pad>"""
lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(lowercase_ ) , 30000 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ) -> str:
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Any = self.get_rust_tokenizer()
lowerCAmelCase : List[Any] = """I was born in 92000, and this is falsé."""
lowerCAmelCase : Tuple = tokenizer.tokenize(lowercase_ )
lowerCAmelCase : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowerCAmelCase : Dict = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase : List[str] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Any = tokenizer.encode(lowercase_ )
lowerCAmelCase : Dict = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Dict = AlbertTokenizer(lowercase_ , keep_accents=lowercase_ )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase_ , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [48, 25, 21, 1289] )
lowerCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowercase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _snake_case ( self ) -> str:
lowerCAmelCase : str = AlbertTokenizer(lowercase_ )
lowerCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
lowerCAmelCase : Tuple = tokenizer.encode("""multi-sequence build""" )
lowerCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _snake_case ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase : Optional[int] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 720 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[Any] ={
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Union[str, Any] =16
lowerCAmelCase : Optional[Any] =32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 1_6 ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase : List[Any] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase : Optional[Any] = datasets.map(
SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase : Union[str, Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase : Union[str, Any] = 8
else:
lowerCAmelCase : Dict = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ ,padding="""longest""" ,max_length=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
lowerCAmelCase : List[Any] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=SCREAMING_SNAKE_CASE__ ,collate_fn=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=SCREAMING_SNAKE_CASE__ ,collate_fn=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[int] =mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,SCREAMING_SNAKE_CASE__ ) == "1":
lowerCAmelCase : str = 2
# New Code #
lowerCAmelCase : List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase : Optional[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : Optional[int] = config["""lr"""]
lowerCAmelCase : int = int(config["""num_epochs"""] )
lowerCAmelCase : str = int(config["""seed"""] )
lowerCAmelCase : List[Any] = int(config["""batch_size"""] )
lowerCAmelCase : List[str] = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = get_dataloaders(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase : Optional[Any] = AdamW(params=model.parameters() ,lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
lowerCAmelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase : List[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE__ ,model=SCREAMING_SNAKE_CASE__ ,local_sgd_steps=SCREAMING_SNAKE_CASE__ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__ ,)
lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=SCREAMING_SNAKE_CASE__ ,default=SCREAMING_SNAKE_CASE__ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=SCREAMING_SNAKE_CASE__ ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=SCREAMING_SNAKE_CASE__ ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase : Union[str, Any] = HfArgumentParser(InitializationArguments)
lowerCAmelCase : Any = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase : Any = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase : str = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCAmelCase : List[str] =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCAmelCase : Union[str, Any] =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowerCAmelCase : Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : List[Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE__ )
return next_generation
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Create output image
lowerCAmelCase : Union[str, Any] = Image.new("""RGB""" ,(len(cells[0] ), len(SCREAMING_SNAKE_CASE__ )) )
lowerCAmelCase : Tuple = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE__ ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : Any = 2_5_5 - cells[y][x] * 2_5_5
lowerCAmelCase : Tuple = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = new_generation(SCREAMING_SNAKE_CASE__ )
return images
if __name__ == "__main__":
lowerCAmelCase : Any =generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase : Optional[int] =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase : Any =get_tests_dir('fixtures/vocab.json')
lowerCAmelCase : Union[str, Any] =get_tests_dir('fixtures')
class _a ( unittest.TestCase ):
_UpperCamelCase: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Optional[Any] = 0
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Tuple = WavaVecaConfig()
lowerCAmelCase : Dict = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
copyfile(lowercase_ , os.path.join(lowercase_ , """vocab.json""" ) )
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor()
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase : Tuple = WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_ , lowercase_ ) , """r""" ) as f:
lowerCAmelCase : Optional[Any] = json.load(lowercase_ )
config_dict.pop("""processor_class""" )
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f:
f.write(json.dumps(lowercase_ ) )
lowerCAmelCase : int = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Tuple = WavaVecaFeatureExtractor()
lowerCAmelCase : int = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowerCAmelCase : Dict = WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_ , lowercase_ ) , """r""" ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
config_dict.pop("""processor_class""" )
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f:
f.write(json.dumps(lowercase_ ) )
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(lowercase_ )
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" ) as f:
f.write("""{}""" )
lowerCAmelCase : int = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowerCAmelCase : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowerCAmelCase : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
lowerCAmelCase : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _snake_case ( self ) -> str:
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoProcessor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : List[Any] = os.path.join(lowercase_ , """vocab.txt""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase : Any = CustomTokenizer(lowercase_ )
lowerCAmelCase : Tuple = CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_ )
lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Dict:
class _a ( snake_case_ ):
_UpperCamelCase: Any = False
class _a ( snake_case_ ):
_UpperCamelCase: Any = False
class _a ( snake_case_ ):
_UpperCamelCase: Any = "AutoFeatureExtractor"
_UpperCamelCase: Optional[Any] = "AutoTokenizer"
_UpperCamelCase: List[Any] = False
try:
AutoConfig.register("""custom""" , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Dict:
lowerCAmelCase : int = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _a ( unittest.TestCase ):
_UpperCamelCase: Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _snake_case ( cls ) -> Tuple:
lowerCAmelCase : Optional[int] = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Dict = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , """test-processor""" ) , push_to_hub=lowercase_ , use_auth_token=self._token )
lowerCAmelCase : List[Any] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[int] = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , """test-processor-org""" ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization="""valid_org""" , )
lowerCAmelCase : str = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """vocab.txt""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase : Dict = CustomTokenizer(lowercase_ )
lowerCAmelCase : List[Any] = CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowerCAmelCase : int = Repository(lowercase_ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(lowercase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_ , """tokenizer_config.json""" ) ) as f:
lowerCAmelCase : Optional[int] = json.load(lowercase_ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowerCAmelCase : int = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 1_0**9 ):
'''simple docstring'''
lowerCAmelCase : Tuple = 1
lowerCAmelCase : str = 2
lowerCAmelCase : str = 0
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[int] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase : str =logging.getLogger(__name__)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
lowerCAmelCase : Tuple = self.layer[current_layer](lowercase_ , lowercase_ , head_mask[current_layer] )
lowerCAmelCase : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case_ , )
class _a ( snake_case_ ):
def __init__( self , lowercase_ ) -> Optional[Any]:
super().__init__(lowercase_ )
lowerCAmelCase : Dict = BertEncoderWithPabee(lowercase_ )
self.init_weights()
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = 0
def _snake_case ( self , lowercase_ ) -> List[str]:
lowerCAmelCase : Optional[Any] = threshold
def _snake_case ( self , lowercase_ ) -> List[Any]:
lowerCAmelCase : List[Any] = patience
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[Any] = 0
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : List[Any] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : Dict = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(lowercase_ )
@add_start_docstrings_to_model_forward(lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowerCAmelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : str = torch.ones(lowercase_ , device=lowercase_ )
if token_type_ids is None:
lowerCAmelCase : Dict = torch.zeros(lowercase_ , dtype=torch.long , device=lowercase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowercase_ , lowercase_ , lowercase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : str = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : str = torch.ones(lowercase_ , device=lowercase_ )
lowerCAmelCase : Any = self.invert_attention_mask(lowercase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Tuple = self.get_head_mask(lowercase_ , self.config.num_hidden_layers )
lowerCAmelCase : Optional[int] = self.embeddings(
input_ids=lowercase_ , position_ids=lowercase_ , token_type_ids=lowercase_ , inputs_embeds=lowercase_ )
lowerCAmelCase : str = embedding_output
if self.training:
lowerCAmelCase : Any = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
lowercase_ , current_layer=lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ )
lowerCAmelCase : Any = self.pooler(lowercase_ )
lowerCAmelCase : Optional[Any] = output_layers[i](output_dropout(lowercase_ ) )
res.append(lowercase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Optional[Any] = self.encoder(
lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase : List[str] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : int = [output_layers[self.config.num_hidden_layers - 1](lowercase_ )]
else:
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : List[Any] = self.encoder.adaptive_forward(
lowercase_ , current_layer=lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ )
lowerCAmelCase : List[Any] = self.pooler(lowercase_ )
lowerCAmelCase : Optional[int] = output_layers[i](lowercase_ )
if regression:
lowerCAmelCase : str = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Optional[int] = 0
else:
lowerCAmelCase : List[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowercase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : int = 0
lowerCAmelCase : List[str] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Tuple = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case_ , )
class _a ( snake_case_ ):
def __init__( self , lowercase_ ) -> int:
super().__init__(lowercase_ )
lowerCAmelCase : Optional[int] = config.num_labels
lowerCAmelCase : Optional[Any] = BertModelWithPabee(lowercase_ )
lowerCAmelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> int:
lowerCAmelCase : int = self.bert(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Union[str, Any] = 0
for ix, logits_item in enumerate(lowercase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : str = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 706 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = OmegaConf.load(SCREAMING_SNAKE_CASE__ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE__ ) ) )
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
if conf_path is None:
lowerCAmelCase : List[str] = """./model_checkpoints/vqgan_only.yaml"""
lowerCAmelCase : Optional[int] = load_config(SCREAMING_SNAKE_CASE__ ,display=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = VQModel(**config.model.params )
if ckpt_path is None:
lowerCAmelCase : Optional[Any] = """./model_checkpoints/vqgan_only.pt"""
lowerCAmelCase : int = torch.load(SCREAMING_SNAKE_CASE__ ,map_location=SCREAMING_SNAKE_CASE__ )
if ".ckpt" in ckpt_path:
lowerCAmelCase : List[str] = sd["""state_dict"""]
model.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
del sd
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = model.encode(SCREAMING_SNAKE_CASE__ )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowerCAmelCase : Optional[int] = model.decode(SCREAMING_SNAKE_CASE__ )
return xrec
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
lowerCAmelCase : Tuple = string.rsplit(""".""" ,1 )
if reload:
lowerCAmelCase : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
importlib.reload(SCREAMING_SNAKE_CASE__ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE__ ,package=SCREAMING_SNAKE_CASE__ ) ,cls )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" ,{} ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
lowerCAmelCase : Tuple = instantiate_from_config(SCREAMING_SNAKE_CASE__ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if ckpt:
lowerCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE__ ,map_location="""cpu""" )
lowerCAmelCase : Dict = pl_sd["""global_step"""]
print(F"""loaded model from global step {global_step}.""" )
else:
lowerCAmelCase : int = {"""state_dict""": None}
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[int] = load_model_from_config(config.model ,pl_sd["""state_dict"""] ,gpu=SCREAMING_SNAKE_CASE__ ,eval_mode=SCREAMING_SNAKE_CASE__ )["""model"""]
return model, global_step
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if curr_ind == len(SCREAMING_SNAKE_CASE__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 ,len(SCREAMING_SNAKE_CASE__ ) ):
if valid_connection(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
# Insert current vertex into path as next transition
lowerCAmelCase : Any = next_ver
# Validate created path
if util_hamilton_cycle(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,curr_ind + 1 ):
return True
# Backtrack
lowerCAmelCase : Optional[Any] = -1
return False
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0 ):
'''simple docstring'''
lowerCAmelCase : List[str] = [-1] * (len(SCREAMING_SNAKE_CASE__ ) + 1)
# initialize start and end of path with starting index
lowerCAmelCase : Any = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,1 ) else []
| 708 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase : List[Any] ={'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a ( unittest.TestCase ):
_UpperCamelCase: Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCamelCase: List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCamelCase: List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCamelCase: Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
lowerCAmelCase : Dict = ZeroShotClassificationPipeline(
model=lowercase_ , tokenizer=lowercase_ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _snake_case ( self , lowercase_ , lowercase_ ) -> List[str]:
lowerCAmelCase : Union[str, Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(lowercase_ , {"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ )], """scores""": [ANY(lowercase_ )]} )
# No kwarg
lowerCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(lowercase_ , {"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ )], """scores""": [ANY(lowercase_ )]} )
lowerCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(lowercase_ , {"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ )], """scores""": [ANY(lowercase_ )]} )
lowerCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
lowercase_ , {"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ ), ANY(lowercase_ )], """scores""": [ANY(lowercase_ ), ANY(lowercase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
lowerCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
lowercase_ , {"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ ), ANY(lowercase_ )], """scores""": [ANY(lowercase_ ), ANY(lowercase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
lowerCAmelCase : Union[str, Any] = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(lowercase_ , {"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ )], """scores""": [ANY(lowercase_ )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCAmelCase : List[Any] = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
lowercase_ , [
{"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ ), ANY(lowercase_ )], """scores""": [ANY(lowercase_ ), ANY(lowercase_ )]}
for i in range(1 )
] , )
lowerCAmelCase : Union[str, Any] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
lowercase_ , [
{"""sequence""": ANY(lowercase_ ), """labels""": [ANY(lowercase_ ), ANY(lowercase_ )], """scores""": [ANY(lowercase_ ), ANY(lowercase_ )]}
for i in range(2 )
] , )
with self.assertRaises(lowercase_ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(lowercase_ ):
classifier(lowercase_ , candidate_labels="""politics""" )
with self.assertRaises(lowercase_ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(lowercase_ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=lowercase_ )
with self.assertRaises(lowercase_ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(lowercase_ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=lowercase_ , )
self.run_entailment_id(lowercase_ )
def _snake_case ( self , lowercase_ ) -> Optional[int]:
lowerCAmelCase : str = zero_shot_classifier.model.config
lowerCAmelCase : Tuple = config.labelaid
lowerCAmelCase : str = zero_shot_classifier.entailment_id
lowerCAmelCase : int = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCAmelCase : List[str] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCAmelCase : List[str] = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCAmelCase : str = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCAmelCase : List[str] = original_labelaid
self.assertEqual(lowercase_ , zero_shot_classifier.entailment_id )
@require_torch
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : str = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _snake_case ( self ) -> str:
lowerCAmelCase : List[str] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _snake_case ( self ) -> str:
lowerCAmelCase : Union[str, Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Any = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
lowerCAmelCase : Any = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCAmelCase : Union[str, Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : str = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
lowerCAmelCase : int = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCAmelCase : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=lowercase_ , )
self.assertEqual(
nested_simplify(lowercase_ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=3 , lowercase_=224 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , ) -> int:
lowerCAmelCase : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Dict = min_resolution
lowerCAmelCase : List[Any] = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Optional[Any] = size
lowerCAmelCase : Optional[int] = do_normalize
lowerCAmelCase : int = image_mean
lowerCAmelCase : List[str] = image_std
def _snake_case ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: str = ViTImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = EfficientFormerImageProcessorTester(self )
@property
def _snake_case ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processor(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Dict:
# Initialize image_processor
lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processor(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _snake_case ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase : Tuple = image_processor(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
class _a :
def __init__( self , lowercase_ ) -> int:
# we need a list not a string, so do something to change the type
lowerCAmelCase : Tuple = arr.split(""",""" )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : int = [int(self.array[0] )] * len(self.array )
lowerCAmelCase : Optional[int] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowerCAmelCase : Any = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowerCAmelCase : Optional[int] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =input('please input some numbers:')
lowerCAmelCase : Optional[int] =SubArray(whole_array)
lowerCAmelCase : Any =array.solve_sub_array()
print(('the results is:', re))
| 712 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : int ={'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase : int =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ,"""r""" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Optional[Any] = line.strip()
if line:
lowerCAmelCase : Dict = line.split()
lowerCAmelCase : Dict = line_number
lowerCAmelCase : List[str] = words[0]
lowerCAmelCase : List[Any] = value
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase : List[str] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowerCAmelCase : List[str] = """param"""
if weight_type is not None and weight_type != "param":
lowerCAmelCase : Any = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase : Tuple = hf_pointer
for attribute in hf_param_name.split(""".""" ):
lowerCAmelCase : int = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase : int = value[0]
else:
lowerCAmelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase : List[str] = value
elif weight_type == "weight_g":
lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
lowerCAmelCase : str = value
elif weight_type == "bias":
lowerCAmelCase : List[str] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
lowerCAmelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = value
else:
lowerCAmelCase : Optional[int] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowerCAmelCase : str = """param"""
if weight_type is not None and weight_type != "param":
lowerCAmelCase : int = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase : Optional[Any] = """.""".join([key, hf_param_name] )
else:
lowerCAmelCase : List[str] = key
lowerCAmelCase : Dict = value if """lm_head""" in full_key else value[0]
lowerCAmelCase : List[str] ={
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Dict = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase : Optional[Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase : Dict = True
if "*" in mapped_key:
lowerCAmelCase : Union[str, Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
lowerCAmelCase : str = mapped_key.replace("""*""" ,SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
lowerCAmelCase : List[str] = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase : Union[str, Any] = """weight_v"""
elif "bias" in name:
lowerCAmelCase : Optional[int] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase : List[Any] = """weight"""
else:
lowerCAmelCase : Tuple = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
set_recursively(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return is_used
return is_used
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = fairseq_model.state_dict()
lowerCAmelCase : int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,hf_model.config.feat_extract_norm == """group""" ,)
lowerCAmelCase : List[Any] = True
else:
lowerCAmelCase : str = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase : List[Any] = name.split(""".""" )
lowerCAmelCase : Any = int(items[0] )
lowerCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
if config_path is not None:
lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Any = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase : str = read_txt_into_dict(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = idalabel
lowerCAmelCase : List[Any] = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,)
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
elif is_finetuned:
if dict_path:
lowerCAmelCase : int = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase : Dict = target_dict.pad_index
lowerCAmelCase : Optional[int] = target_dict.bos_index
lowerCAmelCase : Optional[int] = target_dict.eos_index
lowerCAmelCase : Optional[Any] = len(target_dict.symbols )
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE__ ,"""vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase : Dict = 0
lowerCAmelCase : Any = 1
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=SCREAMING_SNAKE_CASE__ ,)
lowerCAmelCase : Tuple = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,)
lowerCAmelCase : Dict = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned or is_seq_class:
lowerCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase : Optional[Any] = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase : Dict = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase : List[Any] =parser.parse_args()
lowerCAmelCase : List[str] =not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _a ( snake_case_ ):
_UpperCamelCase: "DiagonalGaussianDistribution"
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = True
@register_to_config
def __init__( self , lowercase_ = 3 , lowercase_ = 3 , lowercase_ = ("DownEncoderBlock2D",) , lowercase_ = ("UpDecoderBlock2D",) , lowercase_ = (64,) , lowercase_ = 1 , lowercase_ = "silu" , lowercase_ = 4 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = 0.1_8_2_1_5 , ) -> Dict:
super().__init__()
# pass init params to Encoder
lowerCAmelCase : Dict = Encoder(
in_channels=lowercase_ , out_channels=lowercase_ , down_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , double_z=lowercase_ , )
# pass init params to Decoder
lowerCAmelCase : Union[str, Any] = Decoder(
in_channels=lowercase_ , out_channels=lowercase_ , up_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , norm_num_groups=lowercase_ , act_fn=lowercase_ , )
lowerCAmelCase : Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
lowerCAmelCase : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , 1 )
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : str = False
# only relevant if vae tiling is enabled
lowerCAmelCase : Optional[int] = self.config.sample_size
lowerCAmelCase : str = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
lowerCAmelCase : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
lowerCAmelCase : Union[str, Any] = 0.2_5
def _snake_case ( self , lowercase_ , lowercase_=False ) -> Union[str, Any]:
if isinstance(lowercase_ , (Encoder, Decoder) ):
lowerCAmelCase : Tuple = value
def _snake_case ( self , lowercase_ = True ) -> Dict:
lowerCAmelCase : Optional[int] = use_tiling
def _snake_case ( self ) -> List[str]:
self.enable_tiling(lowercase_ )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Optional[Any] = True
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Tuple = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _snake_case ( self ) -> Dict[str, AttentionProcessor]:
lowerCAmelCase : Dict = {}
def fn_recursive_add_processors(lowercase_ , lowercase_ , lowercase_ ):
if hasattr(lowercase_ , """set_processor""" ):
lowerCAmelCase : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowercase_ , lowercase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase_ , lowercase_ , lowercase_ )
return processors
def _snake_case ( self , lowercase_ ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowercase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowercase_ , lowercase_ , lowercase_ ):
if hasattr(lowercase_ , """set_processor""" ):
if not isinstance(lowercase_ , lowercase_ ):
module.set_processor(lowercase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowercase_ , lowercase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase_ , lowercase_ , lowercase_ )
def _snake_case ( self ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _snake_case ( self , lowercase_ , lowercase_ = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowercase_ , return_dict=lowercase_ )
if self.use_slicing and x.shape[0] > 1:
lowerCAmelCase : Union[str, Any] = [self.encoder(lowercase_ ) for x_slice in x.split(1 )]
lowerCAmelCase : Union[str, Any] = torch.cat(lowercase_ )
else:
lowerCAmelCase : Dict = self.encoder(lowercase_ )
lowerCAmelCase : Tuple = self.quant_conv(lowercase_ )
lowerCAmelCase : List[Any] = DiagonalGaussianDistribution(lowercase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowercase_ , return_dict=lowercase_ )
lowerCAmelCase : Tuple = self.post_quant_conv(lowercase_ )
lowerCAmelCase : Tuple = self.decoder(lowercase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
@apply_forward_hook
def _snake_case ( self , lowercase_ , lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
lowerCAmelCase : Optional[Any] = [self._decode(lowercase_ ).sample for z_slice in z.split(1 )]
lowerCAmelCase : str = torch.cat(lowercase_ )
else:
lowerCAmelCase : List[str] = self._decode(lowercase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
lowerCAmelCase : int = min(a.shape[2] , b.shape[2] , lowercase_ )
for y in range(lowercase_ ):
lowerCAmelCase : Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Any:
lowerCAmelCase : Optional[Any] = min(a.shape[3] , b.shape[3] , lowercase_ )
for x in range(lowercase_ ):
lowerCAmelCase : List[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _snake_case ( self , lowercase_ , lowercase_ = True ) -> AutoencoderKLOutput:
lowerCAmelCase : int = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
lowerCAmelCase : str = int(self.tile_latent_min_size * self.tile_overlap_factor )
lowerCAmelCase : Optional[Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
lowerCAmelCase : Optional[int] = []
for i in range(0 , x.shape[2] , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for j in range(0 , x.shape[3] , lowercase_ ):
lowerCAmelCase : List[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
lowerCAmelCase : List[str] = self.encoder(lowercase_ )
lowerCAmelCase : Optional[int] = self.quant_conv(lowercase_ )
row.append(lowercase_ )
rows.append(lowercase_ )
lowerCAmelCase : int = []
for i, row in enumerate(lowercase_ ):
lowerCAmelCase : List[Any] = []
for j, tile in enumerate(lowercase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase : Any = self.blend_v(rows[i - 1][j] , lowercase_ , lowercase_ )
if j > 0:
lowerCAmelCase : int = self.blend_h(row[j - 1] , lowercase_ , lowercase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase_ , dim=3 ) )
lowerCAmelCase : Tuple = torch.cat(lowercase_ , dim=2 )
lowerCAmelCase : Dict = DiagonalGaussianDistribution(lowercase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase : Union[str, Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
lowerCAmelCase : Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
lowerCAmelCase : Optional[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
lowerCAmelCase : List[str] = []
for i in range(0 , z.shape[2] , lowercase_ ):
lowerCAmelCase : int = []
for j in range(0 , z.shape[3] , lowercase_ ):
lowerCAmelCase : Any = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
lowerCAmelCase : int = self.post_quant_conv(lowercase_ )
lowerCAmelCase : Optional[Any] = self.decoder(lowercase_ )
row.append(lowercase_ )
rows.append(lowercase_ )
lowerCAmelCase : Union[str, Any] = []
for i, row in enumerate(lowercase_ ):
lowerCAmelCase : List[Any] = []
for j, tile in enumerate(lowercase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
lowerCAmelCase : List[Any] = self.blend_v(rows[i - 1][j] , lowercase_ , lowercase_ )
if j > 0:
lowerCAmelCase : Optional[Any] = self.blend_h(row[j - 1] , lowercase_ , lowercase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowercase_ , dim=3 ) )
lowerCAmelCase : List[Any] = torch.cat(lowercase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = False , lowercase_ = True , lowercase_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase : List[Any] = sample
lowerCAmelCase : List[str] = self.encode(lowercase_ ).latent_dist
if sample_posterior:
lowerCAmelCase : Optional[Any] = posterior.sample(generator=lowercase_ )
else:
lowerCAmelCase : Optional[Any] = posterior.mode()
lowerCAmelCase : Dict = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 30.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 716 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = BlipImageProcessor()
lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowerCAmelCase : List[str] = BlipaProcessor(lowercase_ , lowercase_ )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase_ ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer
def _snake_case ( self , **lowercase_ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def _snake_case ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> str:
lowerCAmelCase : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[str] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowerCAmelCase : Optional[int] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : int = self.get_tokenizer()
lowerCAmelCase : Any = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Optional[int] = image_processor(lowercase_ , return_tensors="""np""" )
lowerCAmelCase : Tuple = processor(images=lowercase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : int = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase : Optional[int] = """lower newer"""
lowerCAmelCase : Tuple = processor(text=lowercase_ )
lowerCAmelCase : Dict = tokenizer(lowercase_ , return_token_type_ids=lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> Dict:
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : List[Any] = self.get_tokenizer()
lowerCAmelCase : Tuple = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase : Any = """lower newer"""
lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase : List[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _snake_case ( self ) -> str:
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : Optional[int] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Union[str, Any] = processor.batch_decode(lowercase_ )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self ) -> Any:
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowerCAmelCase : Optional[Any] = """lower newer"""
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Tuple = processor(text=lowercase_ , images=lowercase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 717 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1.0 , lowercase_ = None , ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase : List[Any] = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Any = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : List[Any] = name
def __call__( self , lowercase_ ) -> List[Any]:
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : int = tf.cast(lowercase_ , tf.floataa )
lowerCAmelCase : List[str] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : int = global_step_float / warmup_steps_float
lowerCAmelCase : Tuple = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , )
def _snake_case ( self ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 0.9 ,SCREAMING_SNAKE_CASE__ = 0.999 ,SCREAMING_SNAKE_CASE__ = 1e-8 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0.0 ,SCREAMING_SNAKE_CASE__ = 1.0 ,SCREAMING_SNAKE_CASE__ = None ,):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE__ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=SCREAMING_SNAKE_CASE__ ,)
if num_warmup_steps:
lowerCAmelCase : Dict = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE__ ,decay_schedule_fn=SCREAMING_SNAKE_CASE__ ,warmup_steps=SCREAMING_SNAKE_CASE__ ,)
if weight_decay_rate > 0.0:
lowerCAmelCase : Optional[int] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE__ ,weight_decay_rate=SCREAMING_SNAKE_CASE__ ,beta_a=SCREAMING_SNAKE_CASE__ ,beta_a=SCREAMING_SNAKE_CASE__ ,epsilon=SCREAMING_SNAKE_CASE__ ,clipnorm=SCREAMING_SNAKE_CASE__ ,global_clipnorm=SCREAMING_SNAKE_CASE__ ,exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] ,include_in_weight_decay=SCREAMING_SNAKE_CASE__ ,)
else:
lowerCAmelCase : int = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE__ ,beta_a=SCREAMING_SNAKE_CASE__ ,beta_a=SCREAMING_SNAKE_CASE__ ,epsilon=SCREAMING_SNAKE_CASE__ ,clipnorm=SCREAMING_SNAKE_CASE__ ,global_clipnorm=SCREAMING_SNAKE_CASE__ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _a ( snake_case_ ):
def __init__( self , lowercase_ = 0.0_0_1 , lowercase_ = 0.9 , lowercase_ = 0.9_9_9 , lowercase_ = 1e-7 , lowercase_ = False , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "AdamWeightDecay" , **lowercase_ , ) -> Any:
super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
lowerCAmelCase : List[str] = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : int = exclude_from_weight_decay
@classmethod
def _snake_case ( cls , lowercase_ ) -> Tuple:
lowerCAmelCase : int = {"""WarmUp""": WarmUp}
return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Optional[int] = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : List[str] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _snake_case ( self , lowercase_ , lowercase_=None , **lowercase_ ) -> Dict:
lowerCAmelCase : Union[str, Any] = list(zip(*lowercase_ ) )
return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Optional[Any] = apply_state or {}
lowerCAmelCase : int = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : int = self._fallback_apply_state(lowercase_ , lowercase_ )
lowerCAmelCase : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_=None ) -> Optional[int]:
lowerCAmelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
lowerCAmelCase : int = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> int:
lowerCAmelCase : List[str] = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
lowerCAmelCase : List[Any] = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Any:
lowerCAmelCase : int = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _snake_case ( self , lowercase_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return False
return True
class _a ( snake_case_ ):
def __init__( self ) -> Optional[Any]:
lowerCAmelCase : int = []
lowerCAmelCase : Dict = None
@property
def _snake_case ( self ) -> Any:
if self._accum_steps is None:
lowerCAmelCase : Any = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _snake_case ( self ) -> int:
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , lowercase_ ) -> Union[str, Any]:
if not self._gradients:
lowerCAmelCase : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase_ ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}""" )
for accum_gradient, gradient in zip(self._gradients , lowercase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase_ )
self._accum_steps.assign_add(1 )
def _snake_case ( self ) -> Union[str, Any]:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase_ ) )
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 720 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 0.00
lowerCAmelCase : Optional[int] = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase : List[Any] = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE__ )
index += 1
return 1 / first_sum
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0.00
lowerCAmelCase : Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase : Dict = F"""Resistor at index {index} has a negative value!"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : int ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Optional[Any] = "align_text_model"
def __init__( self , lowercase_=30522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , **lowercase_ , ) -> Any:
super().__init__(**lowercase_ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Optional[int] = position_embedding_type
lowerCAmelCase : int = use_cache
lowerCAmelCase : Tuple = pad_token_id
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCAmelCase : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: Dict = "align_vision_model"
def __init__( self , lowercase_ = 3 , lowercase_ = 600 , lowercase_ = 2.0 , lowercase_ = 3.1 , lowercase_ = 8 , lowercase_ = [3, 3, 5, 3, 5, 5, 3] , lowercase_ = [32, 16, 24, 40, 80, 112, 192] , lowercase_ = [16, 24, 40, 80, 112, 192, 320] , lowercase_ = [] , lowercase_ = [1, 2, 2, 2, 1, 2, 1] , lowercase_ = [1, 2, 2, 3, 3, 4, 1] , lowercase_ = [1, 6, 6, 6, 6, 6, 6] , lowercase_ = 0.2_5 , lowercase_ = "swish" , lowercase_ = 2560 , lowercase_ = "mean" , lowercase_ = 0.0_2 , lowercase_ = 0.0_0_1 , lowercase_ = 0.9_9 , lowercase_ = 0.2 , **lowercase_ , ) -> List[Any]:
super().__init__(**lowercase_ )
lowerCAmelCase : List[Any] = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = width_coefficient
lowerCAmelCase : List[str] = depth_coefficient
lowerCAmelCase : Optional[Any] = depth_divisor
lowerCAmelCase : Dict = kernel_sizes
lowerCAmelCase : Union[str, Any] = in_channels
lowerCAmelCase : Optional[Any] = out_channels
lowerCAmelCase : Optional[Any] = depthwise_padding
lowerCAmelCase : Union[str, Any] = strides
lowerCAmelCase : Union[str, Any] = num_block_repeats
lowerCAmelCase : Optional[Any] = expand_ratios
lowerCAmelCase : List[str] = squeeze_expansion_ratio
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dim
lowerCAmelCase : Any = pooling_type
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = batch_norm_eps
lowerCAmelCase : Optional[int] = batch_norm_momentum
lowerCAmelCase : Tuple = drop_connect_rate
lowerCAmelCase : Optional[Any] = sum(lowercase_ ) * 4
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase : List[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCAmelCase : Union[str, Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _a ( snake_case_ ):
_UpperCamelCase: Any = "align"
_UpperCamelCase: Any = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=640 , lowercase_=1.0 , lowercase_=0.0_2 , **lowercase_ , ) -> List[str]:
super().__init__(**lowercase_ )
if text_config is None:
lowerCAmelCase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
lowerCAmelCase : List[Any] = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
lowerCAmelCase : Any = AlignTextConfig(**lowercase_ )
lowerCAmelCase : Dict = AlignVisionConfig(**lowercase_ )
lowerCAmelCase : Any = projection_dim
lowerCAmelCase : int = temperature_init_value
lowerCAmelCase : Optional[Any] = initializer_range
@classmethod
def _snake_case ( cls , lowercase_ , lowercase_ , **lowercase_ ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCAmelCase : List[str] = self.text_config.to_dict()
lowerCAmelCase : List[Any] = self.vision_config.to_dict()
lowerCAmelCase : Any = self.__class__.model_type
return output
| 700 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] ='%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
lowerCAmelCase : Optional[int] =F'''https://www.google.com/search?q={query}&num=100'''
lowerCAmelCase : Dict =requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
lowerCAmelCase : Optional[int] =(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
lowerCAmelCase : str =parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
from math import factorial
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
lowerCAmelCase : List[Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCAmelCase : int = float(factorial(SCREAMING_SNAKE_CASE__ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.7_5))
| 703 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCAmelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"""| 0 | 0 | {nor_gate(0 ,0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 ,1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 ,0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 ,1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : int ={
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =['PoolFormerFeatureExtractor']
lowerCAmelCase : List[str] =['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 693 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = emb.weight.shape
lowerCAmelCase : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = emb.weight.data
return lin_layer
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = torch.load(SCREAMING_SNAKE_CASE__ ,map_location="""cpu""" )
lowerCAmelCase : Optional[Any] = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCAmelCase : List[str] = checkpoint["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCAmelCase : List[str] = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()}
lowerCAmelCase : Optional[int] = XGLMConfig(
vocab_size=SCREAMING_SNAKE_CASE__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
lowerCAmelCase : str = XGLMForCausalLM(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = model.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : Optional[int] =parser.parse_args()
lowerCAmelCase : Union[str, Any] =convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 705 |
import os
import string
import sys
lowerCAmelCase : Optional[int] =1 << 8
lowerCAmelCase : List[Any] ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCAmelCase : Optional[Any] =KEYMAP['up']
lowerCAmelCase : Tuple =KEYMAP['left']
if sys.platform == "win32":
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCAmelCase : Optional[Any] =ord(str(i))
def _UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase : Any = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE__ ) == 0:
# Read the keystroke
lowerCAmelCase : int = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase : str = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE__ )
if ord(SCREAMING_SNAKE_CASE__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase : Optional[int] = cha[1]
else:
lowerCAmelCase : Any = ch.decode(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase : Optional[int] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase : List[Any] = sys.stdin.fileno()
lowerCAmelCase : str = termios.tcgetattr(SCREAMING_SNAKE_CASE__ )
try:
tty.setraw(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE__ ,termios.TCSADRAIN ,SCREAMING_SNAKE_CASE__ )
return ch
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["esc"]:
lowerCAmelCase : int = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) == KEYMAP["mod_int"]:
lowerCAmelCase : Tuple = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 693 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase : List[Any] ='\\n Text data.\n Second line of data.'
lowerCAmelCase : Union[str, Any] ='file'
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
lowerCAmelCase : str = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
with zstd.open(SCREAMING_SNAKE_CASE__ , """wb""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return path
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE__ ) , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
lowerCAmelCase : Optional[int] = input_paths[compression_format]
lowerCAmelCase : Optional[Any] = tmp_path / """cache"""
lowerCAmelCase : List[str] = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE__ , extract_compressed_file=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Tuple = cached_path(SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
with open(SCREAMING_SNAKE_CASE__ ) as f:
lowerCAmelCase : Optional[int] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = """custom_cache"""
lowerCAmelCase : Optional[int] = """custom_extracted_dir"""
lowerCAmelCase : str = tmp_path / """custom_extracted_path"""
if default_extracted:
lowerCAmelCase : int = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , SCREAMING_SNAKE_CASE__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowerCAmelCase : Tuple = xz_file
lowerCAmelCase : Optional[int] = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE__ )
)
lowerCAmelCase : Dict = cached_path(SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
assert Path(SCREAMING_SNAKE_CASE__ ).parent.parts[-2:] == expected
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = str(Path(SCREAMING_SNAKE_CASE__ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE__ ) == text_file
# relative path
lowerCAmelCase : str = str(Path(SCREAMING_SNAKE_CASE__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE__ ) == text_file
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
cached_path(SCREAMING_SNAKE_CASE__ )
# relative path
lowerCAmelCase : List[Any] = """./__missing_file__.txt"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
cached_path(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(SCREAMING_SNAKE_CASE__ ) as f:
lowerCAmelCase : int = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_get("""https://huggingface.co""" , temp_file=SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
fsspec_head("""s3://huggingface.co""" )
| 706 |
# Imports
import numpy as np
class _a :
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
def _snake_case ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
if red is not None:
lowerCAmelCase : str = red
if green is not None:
lowerCAmelCase : Optional[int] = green
if blue is not None:
lowerCAmelCase : Optional[int] = blue
if red_edge is not None:
lowerCAmelCase : Tuple = red_edge
if nir is not None:
lowerCAmelCase : Union[str, Any] = nir
return True
def _snake_case ( self , lowercase_="" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None ) -> Optional[int]:
self.set_matricies(red=lowercase_ , green=lowercase_ , blue=lowercase_ , red_edge=lowercase_ , nir=lowercase_ )
lowerCAmelCase : int = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self ) -> Dict:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self ) -> List[str]:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self ) -> Tuple:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self ) -> Optional[int]:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self ) -> Tuple:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self ) -> int:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , lowercase_=0.0_8 , lowercase_=1.2_2 , lowercase_=0.0_3 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self ) -> Any:
return (self.nir / self.green) - 1
def _snake_case ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self ) -> str:
return (self.red - self.blue) / self.red
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self ) -> Optional[Any]:
return self.nir - self.green
def _snake_case ( self ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , lowercase_=0.1_6 ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , lowercase_=0.5 ) -> List[str]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self ) -> str:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self ) -> Union[str, Any]:
return self.nir / self.red
def _snake_case ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self ) -> Dict:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self ) -> Dict:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self ) -> Optional[int]:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : Any = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self ) -> List[str]:
return self.nir / self.red
def _snake_case ( self ) -> int:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 693 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : str ={
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "conditional_detr"
_UpperCamelCase: Union[str, Any] = ["past_key_values"]
_UpperCamelCase: Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=300 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=2 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=2 , lowercase_=5 , lowercase_=2 , lowercase_=0.2_5 , **lowercase_ , ) -> Union[str, Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : List[str] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : str = backbone_config.get("""model_type""" )
lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : List[str] = config_class.from_dict(lowercase_ )
lowerCAmelCase : Optional[Any] = use_timm_backbone
lowerCAmelCase : Any = backbone_config
lowerCAmelCase : List[Any] = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : str = d_model
lowerCAmelCase : Dict = encoder_ffn_dim
lowerCAmelCase : Union[str, Any] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : int = decoder_ffn_dim
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Optional[int] = decoder_attention_heads
lowerCAmelCase : Union[str, Any] = dropout
lowerCAmelCase : Tuple = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : Optional[Any] = activation_function
lowerCAmelCase : str = init_std
lowerCAmelCase : List[str] = init_xavier_std
lowerCAmelCase : Optional[int] = encoder_layerdrop
lowerCAmelCase : Tuple = decoder_layerdrop
lowerCAmelCase : str = encoder_layers
lowerCAmelCase : Dict = auxiliary_loss
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = dilation
# Hungarian matcher
lowerCAmelCase : int = class_cost
lowerCAmelCase : str = bbox_cost
lowerCAmelCase : str = giou_cost
# Loss coefficients
lowerCAmelCase : List[str] = mask_loss_coefficient
lowerCAmelCase : Optional[Any] = dice_loss_coefficient
lowerCAmelCase : int = cls_loss_coefficient
lowerCAmelCase : List[str] = bbox_loss_coefficient
lowerCAmelCase : Union[str, Any] = giou_loss_coefficient
lowerCAmelCase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
def _snake_case ( self ) -> Any:
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : Any = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Dict = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Tuple ='▁'
lowerCAmelCase : str ={'vocab_file': 'spiece.model'}
lowerCAmelCase : Tuple ={
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
lowerCAmelCase : Union[str, Any] ={
'google/pegasus-xsum': 512,
}
lowerCAmelCase : Any =logging.get_logger(__name__)
class _a ( snake_case_ ):
_UpperCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase: Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , lowercase_ , lowercase_="<pad>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<mask_2>" , lowercase_="<mask_1>" , lowercase_=None , lowercase_=103 , lowercase_ = None , **lowercase_ , ) -> None:
lowerCAmelCase : Any = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowercase_ )}, but is"""
f""" {type(lowercase_ )}""" )
lowerCAmelCase : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowerCAmelCase : int = additional_special_tokens_extended
else:
lowerCAmelCase : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , pad_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowerCAmelCase : int = mask_token_sent
lowerCAmelCase : Tuple = vocab_file
lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# add special tokens to encoder dict
lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
lowerCAmelCase : List[str] = self.__dict__.copy()
lowerCAmelCase : Tuple = None
return state
def __setstate__( self , lowercase_ ) -> Dict:
lowerCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase : str = {}
lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase_ ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def _snake_case ( self , lowercase_ ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase : Dict = self.sp_model.piece_to_id(lowercase_ )
return sp_id + self.offset
def _snake_case ( self , lowercase_ ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase_ ) -> Optional[int]:
lowerCAmelCase : List[Any] = []
lowerCAmelCase : List[str] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def _snake_case ( self , lowercase_=False ) -> Optional[int]:
return 1
def _snake_case ( self , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase_ , lowercase_ = None , lowercase_ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase_ , lowercase_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , """wb""" ) as fi:
lowerCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 708 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 | 0 |
'''simple docstring'''
import torch
from torch import nn
class _a ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1 , lowercase_=False ) -> Dict:
super().__init__()
lowerCAmelCase : Optional[int] = n_token
lowerCAmelCase : Optional[int] = d_embed
lowerCAmelCase : str = d_proj
lowerCAmelCase : str = cutoffs + [n_token]
lowerCAmelCase : Any = [0] + self.cutoffs
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : Any = self.cutoffs[0]
lowerCAmelCase : Optional[int] = len(self.cutoffs ) - 1
lowerCAmelCase : Optional[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase : str = nn.ModuleList()
lowerCAmelCase : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
else:
self.out_projs.append(lowercase_ )
self.out_layers.append(nn.Linear(lowercase_ , lowercase_ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_ ) ) )
self.out_layers.append(nn.Linear(lowercase_ , r_idx - l_idx ) )
lowerCAmelCase : Any = keep_order
def _snake_case ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
if proj is None:
lowerCAmelCase : List[str] = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase : Optional[int] = nn.functional.linear(lowercase_ , proj.t().contiguous() )
lowerCAmelCase : Tuple = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case ( self , lowercase_ , lowercase_=None , lowercase_=False ) -> Any:
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
lowerCAmelCase : int = labels[..., 1:].contiguous()
lowerCAmelCase : str = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase : List[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCAmelCase : str = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase : List[str] = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase : Dict = labels != -100
lowerCAmelCase : Tuple = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase : Dict = (
-nn.functional.log_softmax(lowercase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase : Dict = nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : int = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase : Tuple = self.out_layers[i].weight
lowerCAmelCase : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
lowerCAmelCase : Any = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase : Optional[Any] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Tuple = nn.functional.log_softmax(lowercase_ , dim=1 )
if labels is None:
lowerCAmelCase : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase : List[str] = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase : int = 0
lowerCAmelCase : List[Any] = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
lowerCAmelCase : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase : List[str] = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase : List[str] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase : Dict = labels.index_select(0 , lowercase_ ) - l_idx
lowerCAmelCase : Union[str, Any] = head_logprob.index_select(0 , lowercase_ )
lowerCAmelCase : str = hidden.index_select(0 , lowercase_ )
else:
lowerCAmelCase : Optional[int] = hidden
if i == 0:
if labels is not None:
lowerCAmelCase : List[str] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase : Tuple = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : Any = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase : Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case ( self , lowercase_ ) -> List[str]:
if self.n_clusters == 0:
lowerCAmelCase : Union[str, Any] = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowercase_ , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase : str = self.out_layers[i].weight
lowerCAmelCase : List[Any] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowercase_ )
biases.append(lowercase_ )
lowerCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase : List[str] = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase : List[Any] = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Optional[int] = [0] + self.cutoffs
for i in range(len(lowercase_ ) - 1 ):
lowerCAmelCase : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase : Dict = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase : Dict = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase : Any = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase : List[Any] = nn.functional.log_softmax(lowercase_ , dim=1 )
lowerCAmelCase : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase : int = logprob_i
return out
| 709 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _a ( snake_case_ ):
_UpperCamelCase: List[str] = "detr"
_UpperCamelCase: Dict = ["past_key_values"]
_UpperCamelCase: Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[int] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = None, None, None
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : int = backbone_config
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[str] = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Dict = encoder_layers
lowerCAmelCase : str = encoder_attention_heads
lowerCAmelCase : List[Any] = decoder_ffn_dim
lowerCAmelCase : List[Any] = decoder_layers
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : Dict = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Optional[int] = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Tuple = encoder_layers
lowerCAmelCase : Optional[int] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = use_pretrained_backbone
lowerCAmelCase : List[Any] = dilation
# Hungarian matcher
lowerCAmelCase : Tuple = class_cost
lowerCAmelCase : Union[str, Any] = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : List[Any] = mask_loss_coefficient
lowerCAmelCase : Optional[int] = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : Dict = giou_loss_coefficient
lowerCAmelCase : str = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
@classmethod
def _snake_case ( cls , lowercase_ , **lowercase_ ) -> Any:
return cls(backbone_config=lowercase_ , **lowercase_ )
def _snake_case ( self ) -> Dict[str, any]:
lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : List[str] = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
class _a ( snake_case_ ):
_UpperCamelCase: Any = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 693 | 0 |
from maths.prime_check import is_prime
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : str = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
if is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : int =logging.getLogger()
lowerCAmelCase : str =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( snake_case_ ):
def _snake_case ( self , lowercase_ ) -> List[Any]:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowerCAmelCase : int = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase : Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase : Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowercase_ )
def _snake_case ( self , lowercase_ , lowercase_ = "pytorch" ) -> str:
lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """output""" )
lowerCAmelCase : Dict = os.path.join(lowercase_ , """data""" )
self._create_dummy_data(data_dir=lowercase_ )
lowerCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase : Optional[int] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
lowerCAmelCase : Union[str, Any] = os.path.join(lowercase_ , """metrics.json""" )
with open(lowercase_ ) as f:
lowerCAmelCase : List[str] = json.load(lowercase_ )
return result
@require_torch_gpu
def _snake_case ( self ) -> Any:
lowerCAmelCase : Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : Dict = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> int:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 693 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase : List[str] ='src/transformers'
# Matches is_xxx_available()
lowerCAmelCase : Dict =re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase : str =re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase : int =re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase : int =re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase : Optional[Any] =re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase : Any =re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase : Optional[Any] =re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase : Optional[int] =re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase : Any =re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase : List[str] =re.compile(r'^\s*try:')
# Catches a line with else:
lowerCAmelCase : int =re.compile(r'^\s*else:')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE__ ) is None:
return None
lowerCAmelCase : List[str] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE__ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
lowerCAmelCase : Optional[Any] = f.readlines()
lowerCAmelCase : Optional[Any] = 0
while line_index < len(SCREAMING_SNAKE_CASE__ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : Dict = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Optional[Any] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ).groups()[0]
lowerCAmelCase : Optional[int] = re.findall("""\[([^\]]+)\]""" ,SCREAMING_SNAKE_CASE__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowerCAmelCase : str = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
lowerCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : Any = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowerCAmelCase : Dict = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ) is not None:
lowerCAmelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(""", """ )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ) is not None:
lowerCAmelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(""", """ )
lowerCAmelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
lowerCAmelCase : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : List[str] = []
while (
line_index < len(SCREAMING_SNAKE_CASE__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowerCAmelCase : List[str] = lines[line_index]
lowerCAmelCase : Dict = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowerCAmelCase : Union[str, Any] = lines[line_index]
lowerCAmelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowerCAmelCase : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE__ ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Optional[Any] = []
for key in import_dict_objects.keys():
lowerCAmelCase : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Optional[int] = """base imports""" if key == """none""" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE__ ):
if "__init__.py" in files:
lowerCAmelCase : str = os.path.join(SCREAMING_SNAKE_CASE__ ,"""__init__.py""" )
lowerCAmelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE__ )
if objects is not None:
lowerCAmelCase : Union[str, Any] = analyze_results(*SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase : Optional[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError("""\n\n""".join(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(SCREAMING_SNAKE_CASE__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE__ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(SCREAMING_SNAKE_CASE__ ) / folder).relative_to(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep ,""".""" )
submodules.append(SCREAMING_SNAKE_CASE__ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[int] = str((Path(SCREAMING_SNAKE_CASE__ ) / fname).relative_to(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase : Tuple = short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE__ )
return submodules
lowerCAmelCase : List[Any] =[
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"""transformers""" ,os.path.join(SCREAMING_SNAKE_CASE__ ,"""__init__.py""" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
lowerCAmelCase : Union[str, Any] = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase : int = """\n""".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 712 |
import torch
from diffusers import DiffusionPipeline
class _a ( snake_case_ ):
def __init__( self , lowercase_ , lowercase_ ) -> int:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def __call__( self ) -> List[Any]:
lowerCAmelCase : Union[str, Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
lowerCAmelCase : str = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase : Dict = scheduler_output - scheduler_output + torch.ones_like(lowercase_ )
return result
| 693 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
class _a ( snake_case_ ):
def __init__( self , **lowercase_ ) -> Any:
requires_backends(self , ["""bs4"""] )
super().__init__(**lowercase_ )
def _snake_case ( self , lowercase_ ) -> Optional[int]:
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : Dict = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase : Union[str, Any] = parent.find_all(child.name , recursive=lowercase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowercase_ ) else next(i for i, s in enumerate(lowercase_ , 1 ) if s is child ) )
lowerCAmelCase : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _snake_case ( self , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : str = BeautifulSoup(lowercase_ , """html.parser""" )
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : int = []
for element in html_code.descendants:
if type(lowercase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase : Any = html.unescape(lowercase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowercase_ )
lowerCAmelCase : List[Any] = self.xpath_soup(lowercase_ )
stringaxtag_seq.append(lowercase_ )
stringaxsubs_seq.append(lowercase_ )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _snake_case ( self , lowercase_ , lowercase_ ) -> Any:
lowerCAmelCase : Tuple = """"""
for tagname, subs in zip(lowercase_ , lowercase_ ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , lowercase_ ) -> BatchFeature:
lowerCAmelCase : Dict = False
# Check that strings has a valid type
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = True
elif isinstance(lowercase_ , (list, tuple) ):
if len(lowercase_ ) == 0 or isinstance(html_strings[0] , lowercase_ ):
lowerCAmelCase : Union[str, Any] = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f"""but is of type {type(lowercase_ )}.""" )
lowerCAmelCase : Dict = bool(isinstance(lowercase_ , (list, tuple) ) and (isinstance(html_strings[0] , lowercase_ )) )
if not is_batched:
lowerCAmelCase : List[str] = [html_strings]
# Get nodes + xpaths
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Dict = []
for html_string in html_strings:
lowerCAmelCase : Optional[Any] = self.get_three_from_single(lowercase_ )
nodes.append(lowercase_ )
lowerCAmelCase : List[Any] = []
for node, tag_list, sub_list in zip(lowercase_ , lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = self.construct_xpath(lowercase_ , lowercase_ )
xpath_strings.append(lowercase_ )
xpaths.append(lowercase_ )
# return as Dict
lowerCAmelCase : Union[str, Any] = {"""nodes""": nodes, """xpaths""": xpaths}
lowerCAmelCase : Optional[Any] = BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
return encoded_inputs
| 713 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase : int =[
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase : Dict =[
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase : str =(
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase : Dict =(
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase : Optional[int] =[
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for tf_name, hf_name in patterns:
lowerCAmelCase : List[Any] = k.replace(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return k
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = torch_model.state_dict()
lowerCAmelCase : Tuple = {}
# separating decoder weights
lowerCAmelCase : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
lowerCAmelCase : Tuple = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() ,"""tf -> hf conversion""" ):
lowerCAmelCase : Optional[int] = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase : Union[str, Any] = DECODER_PATTERNS
lowerCAmelCase : List[str] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCAmelCase : str = v.T
lowerCAmelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,"""tf -> hf conversion""" ):
lowerCAmelCase : List[str] = [k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
lowerCAmelCase : Union[str, Any] = REMAINING_PATTERNS
lowerCAmelCase : int = rename_state_dict_key(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowerCAmelCase : Tuple = v.T
lowerCAmelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
lowerCAmelCase : Dict = mapping["""model.embed_positions.weight"""]
lowerCAmelCase : List[Any] = mapping.pop("""model.embed_positions.weight""" )
lowerCAmelCase : Dict = torch_model.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Optional[int] = ["""global_step"""]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ ,desc="""converting tf checkpoint to dict""" ):
lowerCAmelCase : int = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase : List[str] = tf.train.load_variable(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = array
return tf_weights
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Any = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = convert_bigbird_pegasus(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : List[Any] =parser.parse_args()
lowerCAmelCase : Optional[int] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 714 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _a ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : Optional[int] = do_resize
lowerCAmelCase : List[str] = size
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Union[str, Any] = rescale_factor
lowerCAmelCase : int = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Dict = image_std
lowerCAmelCase : Optional[int] = do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _snake_case ( self , lowercase_ , lowercase_=False ) -> List[Any]:
if not batched:
lowerCAmelCase : Tuple = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : Dict = image.size
else:
lowerCAmelCase , lowerCAmelCase : Tuple = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
lowerCAmelCase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase : Optional[int] = self.size["""shortest_edge"""]
lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Any = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = DetrImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : List[str] = DetrImageProcessingTester(self )
@property
def _snake_case ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowercase_ , """image_std""" ) )
self.assertTrue(hasattr(lowercase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
self.assertTrue(hasattr(lowercase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_pad""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _snake_case ( self ) -> List[Any]:
pass
def _snake_case ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : str = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ) -> int:
# prepare image and target
lowerCAmelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : str = json.loads(f.read() )
lowerCAmelCase : List[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
lowerCAmelCase : List[str] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Dict = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify orig_size
lowerCAmelCase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
@slow
def _snake_case ( self ) -> int:
# prepare image, target and masks_path
lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase : Any = json.loads(f.read() )
lowerCAmelCase : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
lowerCAmelCase : List[str] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase : Any = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
lowerCAmelCase : Tuple = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase_ )
lowerCAmelCase : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase_ ) )
# verify boxes
lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase_ ) )
# verify class_labels
lowerCAmelCase : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase_ ) )
# verify masks
lowerCAmelCase : Union[str, Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase_ ) )
# verify size
lowerCAmelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase_ ) )
| 693 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase : Any =logging.getLogger(__name__)
lowerCAmelCase : str =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase : Optional[int] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(snake_case_ )} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _a :
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "The input training data file (a text file)."} )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
_UpperCamelCase: Optional[str] = field(
default=snake_case_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
_UpperCamelCase: bool = field(default=snake_case_ , metadata={"help": "Whether ot not to use whole word mask."} )
_UpperCamelCase: float = field(
default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_UpperCamelCase: float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
_UpperCamelCase: int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
_UpperCamelCase: int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
_UpperCamelCase: bool = field(
default=snake_case_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,):
'''simple docstring'''
def _dataset(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE__ ,file_path=SCREAMING_SNAKE_CASE__ ,block_size=args.block_size ,ref_path=SCREAMING_SNAKE_CASE__ ,)
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE__ ,file_path=SCREAMING_SNAKE_CASE__ ,block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE__ ,file_path=SCREAMING_SNAKE_CASE__ ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=SCREAMING_SNAKE_CASE__ ,)
if evaluate:
return _dataset(args.eval_data_file ,args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file ,args.train_ref_file )
def _UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
lowerCAmelCase : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCAmelCase : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ,)
else:
logger.info("""Training new model from scratch""" )
lowerCAmelCase : Any = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCAmelCase : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase : str = min(data_args.block_size ,tokenizer.max_len )
# Get datasets
lowerCAmelCase : List[str] = (
get_dataset(SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase : Dict = (
get_dataset(SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,evaluate=SCREAMING_SNAKE_CASE__ ,cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE__ ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,)
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase : Tuple = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE__ ,mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase : Tuple = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE__ ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase : Any = Trainer(
model=SCREAMING_SNAKE_CASE__ ,args=SCREAMING_SNAKE_CASE__ ,data_collator=SCREAMING_SNAKE_CASE__ ,train_dataset=SCREAMING_SNAKE_CASE__ ,eval_dataset=SCREAMING_SNAKE_CASE__ ,prediction_loss_only=SCREAMING_SNAKE_CASE__ ,)
# Training
if training_args.do_train:
lowerCAmelCase : List[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : int = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase : str = trainer.evaluate()
lowerCAmelCase : str = math.exp(eval_output["""eval_loss"""] )
lowerCAmelCase : List[str] = {"""perplexity""": perplexity}
lowerCAmelCase : List[str] = os.path.join(training_args.output_dir ,"""eval_results_lm.txt""" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" ,SCREAMING_SNAKE_CASE__ ,str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE__ )
return results
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 715 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
while b > 0:
if b & 1:
lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 693 | 0 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 716 |
from math import factorial
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> Optional[Any]:
lowerCAmelCase : Union[str, Any] = real
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = [1] * rank
else:
lowerCAmelCase : Any = rank
def __repr__( self ) -> int:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowercase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowercase_ )
def __add__( self , lowercase_ ) -> Tuple:
if not isinstance(lowercase_ , lowercase_ ):
return Dual(self.real + other , self.duals )
lowerCAmelCase : int = self.duals.copy()
lowerCAmelCase : Tuple = other.duals.copy()
if len(lowercase_ ) > len(lowercase_ ):
o_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
elif len(lowercase_ ) < len(lowercase_ ):
s_dual.extend([1] * (len(lowercase_ ) - len(lowercase_ )) )
lowerCAmelCase : List[Any] = []
for i in range(len(lowercase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowercase_ )
_UpperCamelCase: List[Any] = __add__
def __sub__( self , lowercase_ ) -> Union[str, Any]:
return self + other * -1
def __mul__( self , lowercase_ ) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowercase_ )
lowerCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowercase_ )
_UpperCamelCase: str = __mul__
def __truediv__( self , lowercase_ ) -> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowercase_ )
raise ValueError
def __floordiv__( self , lowercase_ ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowercase_ )
raise ValueError
def __pow__( self , lowercase_ ) -> str:
if n < 0 or isinstance(lowercase_ , lowercase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCAmelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not callable(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCAmelCase : List[Any] = Dual(SCREAMING_SNAKE_CASE__ ,1 )
lowerCAmelCase : Optional[Any] = func(SCREAMING_SNAKE_CASE__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 693 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 717 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_UpperCamelCase: List[Any] = ["keras_nlp"]
def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple:
requires_backends(self , ["""keras_nlp"""] )
| 693 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : Any ={
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _a ( snake_case_ ):
_UpperCamelCase: Union[str, Any] = "dpt"
def __init__( self , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0_2 , lowercase_=1e-12 , lowercase_=384 , lowercase_=16 , lowercase_=3 , lowercase_=False , lowercase_=True , lowercase_=[2, 5, 8, 11] , lowercase_="project" , lowercase_=[4, 2, 1, 0.5] , lowercase_=[96, 192, 384, 768] , lowercase_=256 , lowercase_=-1 , lowercase_=False , lowercase_=True , lowercase_=0.4 , lowercase_=255 , lowercase_=0.1 , lowercase_=[1, 1024, 24, 24] , lowercase_=[0, 1] , lowercase_=None , **lowercase_ , ) -> List[Any]:
super().__init__(**lowercase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase : Tuple = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCAmelCase : Union[str, Any] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase : List[str] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase : Tuple = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowerCAmelCase : Dict = backbone_featmap_shape
lowerCAmelCase : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Any = None
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : List[Any] = patch_size
lowerCAmelCase : Any = num_channels
lowerCAmelCase : int = qkv_bias
lowerCAmelCase : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCAmelCase : Any = readout_type
lowerCAmelCase : Optional[int] = reassemble_factors
lowerCAmelCase : List[Any] = neck_hidden_sizes
lowerCAmelCase : Optional[int] = fusion_hidden_size
lowerCAmelCase : int = head_in_index
lowerCAmelCase : List[str] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase : Tuple = use_auxiliary_head
lowerCAmelCase : str = auxiliary_loss_weight
lowerCAmelCase : Tuple = semantic_loss_ignore_index
lowerCAmelCase : Tuple = semantic_classifier_dropout
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : Optional[Any] = self.backbone_config.to_dict()
lowerCAmelCase : Tuple = self.__class__.model_type
return output
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase : Optional[Any] =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCAmelCase : List[Any] ={
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
lowerCAmelCase : List[Any] ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase : Any ='facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCAmelCase : Dict ='allenai'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = dict((re.sub(r"""@@$""" ,"""""" ,SCREAMING_SNAKE_CASE__ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" ,"""</w>""" ,SCREAMING_SNAKE_CASE__ ), v) for k, v in d.items() )
lowerCAmelCase : List[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowerCAmelCase : Union[str, Any] = d[k] # restore
return da
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ ,exist_ok=SCREAMING_SNAKE_CASE__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCAmelCase : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = dirname(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCAmelCase : List[Any] = cls.hub_models()
lowerCAmelCase : Union[str, Any] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowerCAmelCase : Tuple = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
lowerCAmelCase : Dict = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,archive_map=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = vars(chkpt["""args"""]["""model"""] )
lowerCAmelCase : Dict = args["""source_lang"""]
lowerCAmelCase : List[Any] = args["""target_lang"""]
lowerCAmelCase : Tuple = dirname(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = basename(SCREAMING_SNAKE_CASE__ )
# dicts
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""dict.{src_lang}.txt""" )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""dict.{tgt_lang}.txt""" )
lowerCAmelCase : Dict = Dictionary.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,"""vocab-src.json""" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCAmelCase : Union[str, Any] = True
for k in src_vocab.keys():
if not k.islower():
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Union[str, Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = rewrite_dict_keys(tgt_dict.indices )
lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,"""vocab-tgt.json""" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# merges_file (bpecodes)
lowerCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ ,VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
break
with open(SCREAMING_SNAKE_CASE__ ,encoding="""utf-8""" ) as fin:
lowerCAmelCase : Optional[Any] = fin.read()
lowerCAmelCase : Any = re.sub(r""" \d+$""" ,"""""" ,SCREAMING_SNAKE_CASE__ ,0 ,re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as fout:
fout.write(SCREAMING_SNAKE_CASE__ )
# model config
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE__ ,"""config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
lowerCAmelCase : Optional[int] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowerCAmelCase : Optional[Any] = 5
lowerCAmelCase : Union[str, Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCAmelCase : Dict = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowerCAmelCase : Tuple = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# tokenizer config
lowerCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(SCREAMING_SNAKE_CASE__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ,ensure_ascii=SCREAMING_SNAKE_CASE__ ,indent=SCREAMING_SNAKE_CASE__ ) )
# model
lowerCAmelCase : List[Any] = chkpt["""models"""][0]
lowerCAmelCase : Union[str, Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCAmelCase : Optional[int] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCAmelCase : Dict = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : int = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
# save
lowerCAmelCase : str = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : Dict =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 719 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase : List[Any] = 4
lowerCAmelCase : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 693 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 720 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCamelCase: Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCamelCase: Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ) -> int:
return self._get_superresolution_dummy_components()
def _snake_case ( self , lowercase_ , lowercase_=0 ) -> Optional[Any]:
if str(lowercase_ ).startswith("""mps""" ):
lowerCAmelCase : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self ) -> Any:
self._test_save_load_local()
def _snake_case ( self ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 693 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def _snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _snake_case ( self ) -> str:
lowerCAmelCase : Any = self.dummy_uncond_unet
lowerCAmelCase : Union[str, Any] = ScoreSdeVeScheduler()
lowerCAmelCase : Tuple = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase : Optional[Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase_ ).images
lowerCAmelCase : Any = torch.manual_seed(0 )
lowerCAmelCase : Tuple = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase_ , return_dict=lowercase_ )[
0
]
lowerCAmelCase : str = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
lowerCAmelCase : int = """google/ncsnpp-church-256"""
lowerCAmelCase : int = UNetaDModel.from_pretrained(lowercase_ )
lowerCAmelCase : str = ScoreSdeVeScheduler.from_pretrained(lowercase_ )
lowerCAmelCase : Optional[Any] = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase : List[str] = torch.manual_seed(0 )
lowerCAmelCase : Any = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=lowercase_ ).images
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "llama"
_UpperCamelCase: List[str] = ["past_key_values"]
def __init__( self , lowercase_=32000 , lowercase_=4096 , lowercase_=11008 , lowercase_=32 , lowercase_=32 , lowercase_=None , lowercase_="silu" , lowercase_=2048 , lowercase_=0.0_2 , lowercase_=1e-6 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=1 , lowercase_=False , lowercase_=None , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Any = num_key_value_heads
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = rms_norm_eps
lowerCAmelCase : int = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , )
def _snake_case ( self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("""type""" , lowercase_ )
lowerCAmelCase : Dict = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 693 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 694 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : list[list[Edge]] = [[] for _ in range(lowerCamelCase )]
snake_case__ : Any = size
def __getitem__( self , lowerCamelCase ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return self._size
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase , lowerCamelCase ) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int | None:
"""simple docstring"""
snake_case__ : Union[str, Any] = deque([start_vertex] )
snake_case__ : list[int | None] = [None] * self.size
snake_case__ : List[str] = 0
while queue:
snake_case__ : List[str] = queue.popleft()
snake_case__ : List[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case__ : str = current_distance + edge.weight
snake_case__ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase , lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
snake_case__ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _A ( snake_case__ : int ):
snake_case__ : int = int(number**0.5 )
return number == sq * sq
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
snake_case__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
snake_case__ : int = x_den * y_den * z_den
snake_case__ : int = gcd(snake_case__ , snake_case__ )
top //= hcf
bottom //= hcf
return top, bottom
def _A ( snake_case__ : int = 35 ):
snake_case__ : set = set()
snake_case__ : int
snake_case__ : Fraction = Fraction(0 )
snake_case__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
snake_case__ : List[str] = x_num * y_den + x_den * y_num
snake_case__ : Tuple = x_den * y_den
snake_case__ : Optional[Any] = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : Optional[Any] = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=2
snake_case__ : List[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
snake_case__ : List[Any] = x_den * x_den * y_den * y_den
if is_sq(snake_case__ ) and is_sq(snake_case__ ):
snake_case__ : Optional[int] = int(sqrt(snake_case__ ) )
snake_case__ : int = int(sqrt(snake_case__ ) )
snake_case__ : Optional[Any] = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : Union[str, Any] = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=-1
snake_case__ : List[str] = x_num * y_num
snake_case__ : Optional[int] = x_den * y_num + x_num * y_den
snake_case__ : str = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : Dict = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=2
snake_case__ : List[Any] = x_num * x_num * y_num * y_num
snake_case__ : str = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(snake_case__ ) and is_sq(snake_case__ ):
snake_case__ : List[str] = int(sqrt(snake_case__ ) )
snake_case__ : List[Any] = int(sqrt(snake_case__ ) )
snake_case__ : Optional[int] = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case__ : Union[str, Any] = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
for num, den in unique_s:
total += Fraction(snake_case__ , snake_case__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
_lowerCAmelCase = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
snake_case__ : List[Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case__ : Tuple = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
_lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ ,snake_case__ ,snake_case__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ ,snake_case__ ,snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ : int = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case__ : List[Any] = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case__ : int = data_args.train_file.split('''.''' )[-1]
snake_case__ : int = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case__ : Union[str, Any] = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
snake_case__ : int = load_dataset('''csv''' , data_files=snake_case__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case__ : int = load_dataset('''json''' , data_files=snake_case__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case__ : Tuple = raw_datasets['''train'''].features['''label'''].names
snake_case__ : Dict = len(snake_case__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case__ : Union[str, Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=snake_case__ , )
snake_case__ : Any = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case__ : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case__ : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case__ : str = {'''Refused''': 0, '''Entailed''': 1}
snake_case__ : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
snake_case__ : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case__ : int ):
snake_case__ : Optional[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
snake_case__ : int = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case__ : Union[str, Any] = examples['''statement''']
snake_case__ : Union[str, Any] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
snake_case__ : str = tokenizer(snake_case__ , snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ )
snake_case__ : str = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
snake_case__ : Dict = raw_datasets.map(
snake_case__ , batched=snake_case__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
snake_case__ : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
snake_case__ : Optional[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
snake_case__ : Any = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
snake_case__ : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
snake_case__ : Dict = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
snake_case__ : int = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(snake_case__ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : EvalPrediction ):
snake_case__ : Tuple = p.predictions[0] if isinstance(p.predictions , snake_case__ ) else p.predictions
snake_case__ : Any = np.argmax(snake_case__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case__ : Tuple = default_data_collator
elif training_args.fpaa:
snake_case__ : Optional[Any] = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 )
else:
snake_case__ : Optional[Any] = None
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
snake_case__ : Dict = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : Tuple = last_checkpoint
snake_case__ : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
snake_case__ : Any = train_result.metrics
snake_case__ : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
snake_case__ : Optional[int] = min(snake_case__ , len(snake_case__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , snake_case__ )
trainer.save_metrics('''train''' , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : List[str] = trainer.evaluate(eval_dataset=snake_case__ )
snake_case__ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
snake_case__ : List[str] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case__ : Any = predict_dataset.remove_columns('''label''' )
snake_case__ : Dict = trainer.predict(snake_case__ , metric_key_prefix='''predict''' ).predictions
snake_case__ : List[str] = np.argmax(snake_case__ , axis=1 )
snake_case__ : Any = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(snake_case__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(snake_case__ ):
snake_case__ : Dict = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
snake_case__ : Optional[Any] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def _A ( snake_case__ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : str = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : Union[str, Any] = {
"google/bigbird-roberta-base": 4_0_9_6,
"google/bigbird-roberta-large": 4_0_9_6,
"google/bigbird-base-trivia-itc": 4_0_9_6,
}
_lowerCAmelCase : int = "▁"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = BigBirdTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = []
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="[SEP]" , lowerCamelCase="[MASK]" , lowerCamelCase="[CLS]" , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
snake_case__ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
snake_case__ : Dict = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
snake_case__ : Optional[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
snake_case__ : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Dict = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
snake_case__ : str = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : str = [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def _A ( snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case__ : int = cst_fwd.get(snake_case__ , np.inf )
snake_case__ : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case__ : Union[str, Any] = new_cost_f
snake_case__ : Dict = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case__ : Dict = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _A ( snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ):
snake_case__ : List[str] = -1
snake_case__ : Optional[Any] = set()
snake_case__ : Tuple = set()
snake_case__ : List[str] = {source: 0}
snake_case__ : Optional[int] = {destination: 0}
snake_case__ : Optional[Any] = {source: None}
snake_case__ : Optional[Any] = {destination: None}
snake_case__ : PriorityQueue[Any] = PriorityQueue()
snake_case__ : PriorityQueue[Any] = PriorityQueue()
snake_case__ : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case__ ,snake_case__ : List[str] = queue_forward.get()
visited_forward.add(snake_case__ )
snake_case__ ,snake_case__ : Tuple = queue_backward.get()
visited_backward.add(snake_case__ )
snake_case__ : List[str] = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
snake_case__ : Union[str, Any] = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case__ : Union[str, Any] = shortest_distance
return shortest_path_distance
_lowerCAmelCase : int = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_lowerCAmelCase : List[str] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 1 |
'''simple docstring'''
_lowerCAmelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase : Any = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase : int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCAmelCase : Any = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
snake_case__ : int = '''The dog is cute and lives in the garden house'''
snake_case__ : Any = jnp.array([tokenizer.encode(lowerCamelCase )] )
snake_case__ : Union[str, Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
snake_case__ : int = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
snake_case__ : str = model(lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowerCamelCase , atol=1E-3 ) )
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Any = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowerCAmelCase : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
_lowerCAmelCase : Optional[int] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = CHRF.CHAR_ORDER , lowerCamelCase = CHRF.WORD_ORDER , lowerCamelCase = CHRF.BETA , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
snake_case__ : List[Any] = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
snake_case__ : Tuple = CHRF(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = sb_chrf.corpus_score(lowerCamelCase , lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 694 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'gpt_bigcode'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowerCamelCase=50257 , lowerCamelCase=1024 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=None , lowerCamelCase="gelu_pytorch_tanh" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1E-5 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=50256 , lowerCamelCase=50256 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , **lowerCamelCase , ) -> int:
"""simple docstring"""
snake_case__ : List[str] = vocab_size
snake_case__ : int = n_positions
snake_case__ : Union[str, Any] = n_embd
snake_case__ : Union[str, Any] = n_layer
snake_case__ : Dict = n_head
snake_case__ : List[str] = n_inner
snake_case__ : List[Any] = activation_function
snake_case__ : Union[str, Any] = resid_pdrop
snake_case__ : str = embd_pdrop
snake_case__ : Optional[Any] = attn_pdrop
snake_case__ : int = layer_norm_epsilon
snake_case__ : List[str] = initializer_range
snake_case__ : int = scale_attn_weights
snake_case__ : int = use_cache
snake_case__ : str = attention_softmax_in_fpaa
snake_case__ : Any = scale_attention_softmax_in_fpaa
snake_case__ : Tuple = multi_query
snake_case__ : Union[str, Any] = bos_token_id
snake_case__ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.layer[current_layer](lowerCamelCase , lowerCamelCase , head_mask[current_layer] )
snake_case__ : Any = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __lowerCamelCase , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase )
snake_case__ : List[Any] = BertEncoderWithPabee(lowerCamelCase )
self.init_weights()
snake_case__ : List[Any] = 0
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Tuple = 0
def lowercase__ ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = threshold
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = patience
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = 0
snake_case__ : Optional[int] = 0
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : str = self.inference_layers_num / self.inference_instances_num
snake_case__ : List[str] = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , ) -> List[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
snake_case__ : Any = input_ids.size()
elif inputs_embeds is not None:
snake_case__ : Tuple = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
snake_case__ : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case__ : Union[str, Any] = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
snake_case__ : List[Any] = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case__ : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
snake_case__ ,snake_case__ ,snake_case__ : int = encoder_hidden_states.size()
snake_case__ : str = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
snake_case__ : Optional[int] = torch.ones(lowerCamelCase , device=lowerCamelCase )
snake_case__ : Dict = self.invert_attention_mask(lowerCamelCase )
else:
snake_case__ : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case__ : Union[str, Any] = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
snake_case__ : List[str] = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
snake_case__ : List[Any] = embedding_output
if self.training:
snake_case__ : Dict = []
for i in range(self.config.num_hidden_layers ):
snake_case__ : str = self.encoder.adaptive_forward(
lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase )
snake_case__ : Any = self.pooler(lowerCamelCase )
snake_case__ : str = output_layers[i](output_dropout(lowerCamelCase ) )
res.append(lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
snake_case__ : List[str] = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
snake_case__ : Any = self.pooler(encoder_outputs[0] )
snake_case__ : Optional[int] = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase )]
else:
snake_case__ : List[Any] = 0
snake_case__ : Any = None
snake_case__ : List[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
snake_case__ : Dict = self.encoder.adaptive_forward(
lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase )
snake_case__ : List[str] = self.pooler(lowerCamelCase )
snake_case__ : Optional[int] = output_layers[i](lowerCamelCase )
if regression:
snake_case__ : List[str] = logits.detach()
if patient_result is not None:
snake_case__ : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
snake_case__ : List[str] = 0
else:
snake_case__ : List[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
snake_case__ : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase ) ):
patient_counter += 1
else:
snake_case__ : Any = 0
snake_case__ : Any = logits
if patient_counter == self.patience:
break
snake_case__ : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __lowerCamelCase , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase )
snake_case__ : Dict = config.num_labels
snake_case__ : Optional[int] = BertModelWithPabee(lowerCamelCase )
snake_case__ : List[Any] = nn.Dropout(config.hidden_dropout_prob )
snake_case__ : List[str] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Dict:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.bert(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
snake_case__ : Optional[Any] = (logits[-1],)
if labels is not None:
snake_case__ : Optional[Any] = None
snake_case__ : Optional[int] = 0
for ix, logits_item in enumerate(lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
snake_case__ : List[Any] = MSELoss()
snake_case__ : Dict = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ : List[Any] = CrossEntropyLoss()
snake_case__ : List[str] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
snake_case__ : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
snake_case__ : int = (total_loss / total_weights,) + outputs
return outputs
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase : Optional[Any] = "RegNetConfig"
# Base docstring
_lowerCAmelCase : Union[str, Any] = "facebook/regnet-y-040"
_lowerCAmelCase : Optional[Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_lowerCAmelCase : str = "facebook/regnet-y-040"
_lowerCAmelCase : List[Any] = "tabby, tabby cat"
_lowerCAmelCase : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = 3 , lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = "relu" , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case__ : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case__ : List[str] = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=lowerCamelCase , strides=lowerCamelCase , padding='''VALID''' , groups=lowerCamelCase , use_bias=lowerCamelCase , name='''convolution''' , )
snake_case__ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
snake_case__ : int = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = self.convolution(self.padding(lowerCamelCase ) )
snake_case__ : List[str] = self.normalization(lowerCamelCase )
snake_case__ : Optional[int] = self.activation(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = config.num_channels
snake_case__ : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = shape_list(lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case__ : List[Any] = tf.transpose(lowerCamelCase , perm=(0, 2, 3, 1) )
snake_case__ : Optional[int] = self.embedder(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase = 2 , **lowerCamelCase ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : List[Any] = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=1 , strides=lowerCamelCase , use_bias=lowerCamelCase , name='''convolution''' )
snake_case__ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(lowerCamelCase ) , training=lowerCamelCase )
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name='''pooler''' )
snake_case__ : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = self.pooler(lowerCamelCase )
for layer_module in self.attention:
snake_case__ : Optional[int] = layer_module(lowerCamelCase )
snake_case__ : Optional[Any] = hidden_state * pooled
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = in_channels != out_channels or stride != 1
snake_case__ : Dict = max(1 , out_channels // config.groups_width )
snake_case__ : int = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case__ : List[Any] = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name='''layer.2''' ),
]
snake_case__ : Any = ACTaFN[config.hidden_act]
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = hidden_state
for layer_module in self.layers:
snake_case__ : List[Any] = layer_module(lowerCamelCase )
snake_case__ : int = self.shortcut(lowerCamelCase )
hidden_state += residual
snake_case__ : List[Any] = self.activation(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , **lowerCamelCase ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : int = in_channels != out_channels or stride != 1
snake_case__ : Optional[Any] = max(1 , out_channels // config.groups_width )
snake_case__ : Tuple = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
snake_case__ : List[str] = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name='''layer.3''' ),
]
snake_case__ : Optional[Any] = ACTaFN[config.hidden_act]
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict = hidden_state
for layer_module in self.layers:
snake_case__ : List[str] = layer_module(lowerCamelCase )
snake_case__ : int = self.shortcut(lowerCamelCase )
hidden_state += residual
snake_case__ : Union[str, Any] = self.activation(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 2 , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : str = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
snake_case__ : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , name='''layers.0''' ),
*[layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
for layer_module in self.layers:
snake_case__ : int = layer_module(lowerCamelCase )
return hidden_state
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
snake_case__ : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase , name=f'''stages.{i+1}''' ) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
snake_case__ : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case__ : Union[str, Any] = hidden_states + (hidden_state,)
snake_case__ : int = stage_module(lowerCamelCase )
if output_hidden_states:
snake_case__ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase )
@keras_serializable
class snake_case ( tf.keras.layers.Layer ):
"""simple docstring"""
_lowerCAmelCase = RegNetConfig
def __init__( self , lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : str = config
snake_case__ : Optional[Any] = TFRegNetEmbeddings(lowerCamelCase , name='''embedder''' )
snake_case__ : int = TFRegNetEncoder(lowerCamelCase , name='''encoder''' )
snake_case__ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name='''pooler''' )
@unpack_inputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
snake_case__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Any = self.embedder(lowerCamelCase , training=lowerCamelCase )
snake_case__ : int = self.encoder(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
snake_case__ : int = encoder_outputs[0]
snake_case__ : int = self.pooler(lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
snake_case__ : str = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
snake_case__ : Any = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case__ : Optional[Any] = tuple([tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = RegNetConfig
_lowerCAmelCase = 'regnet'
_lowerCAmelCase = 'pixel_values'
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase : Dict = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase : Any = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __lowerCamelCase , )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
snake_case__ : Tuple = TFRegNetMainLayer(lowerCamelCase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
snake_case__ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : str = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : int = self.regnet(
pixel_values=lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __lowerCamelCase , )
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
snake_case__ : Dict = config.num_labels
snake_case__ : int = TFRegNetMainLayer(lowerCamelCase , name='''regnet''' )
# classification head
snake_case__ : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
snake_case__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : int = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Dict = self.regnet(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
snake_case__ : Any = outputs.pooler_output if return_dict else outputs[1]
snake_case__ : str = self.classifier[0](lowerCamelCase )
snake_case__ : Any = self.classifier[1](lowerCamelCase )
snake_case__ : Any = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase , logits=lowerCamelCase )
if not return_dict:
snake_case__ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states )
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
while b:
snake_case__ ,snake_case__ : Any = b, a % b
return a
def _A ( snake_case__ : int , snake_case__ : int ):
return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b )
def _A ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCAmelCase : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _A ( ):
snake_case__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case__ : Dict = get_sagemaker_input()
else:
snake_case__ : List[Any] = get_cluster_input()
return config
def _A ( snake_case__ : Tuple=None ):
if subparsers is not None:
snake_case__ : Tuple = subparsers.add_parser('''config''' , description=snake_case__ )
else:
snake_case__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=snake_case__ )
parser.add_argument(
'''--config_file''' , default=snake_case__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def _A ( snake_case__ : List[str] ):
snake_case__ : Optional[Any] = get_user_input()
if args.config_file is not None:
snake_case__ : List[Any] = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
snake_case__ : Any = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(f'''accelerate configuration saved at {config_file}''' )
def _A ( ):
snake_case__ : Tuple = config_command_parser()
snake_case__ : int = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase : List[str] = (7_2_0, 1_2_8_0) # Height, Width
_lowerCAmelCase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase : List[Any] = 1 / 1_0_0
_lowerCAmelCase : List[Any] = ""
_lowerCAmelCase : List[str] = ""
_lowerCAmelCase : int = ""
_lowerCAmelCase : List[str] = 2_5_0
def _A ( ):
snake_case__ ,snake_case__ : List[str] = get_dataset(snake_case__ , snake_case__ )
for index in range(snake_case__ ):
snake_case__ : List[Any] = random.sample(range(len(snake_case__ ) ) , 4 )
snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ : Tuple = random_chars(32 )
snake_case__ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
snake_case__ : Dict = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
snake_case__ : Union[str, Any] = []
for anno in new_annos:
snake_case__ : Dict = anno[3] - anno[1]
snake_case__ : Union[str, Any] = anno[4] - anno[2]
snake_case__ : Dict = anno[1] + width / 2
snake_case__ : Optional[int] = anno[2] + height / 2
snake_case__ : Dict = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(snake_case__ )
with open(f'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = []
for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ):
snake_case__ : str = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(snake_case__ ) as in_file:
snake_case__ : int = in_file.readlines()
snake_case__ : List[str] = os.path.join(snake_case__ , f'''{label_name}.jpg''' )
snake_case__ : int = []
for obj_list in obj_lists:
snake_case__ : int = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case__ : Any = float(obj[1] ) - float(obj[3] ) / 2
snake_case__ : List[Any] = float(obj[2] ) - float(obj[4] ) / 2
snake_case__ : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
snake_case__ : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def _A ( snake_case__ : list , snake_case__ : list , snake_case__ : list[int] , snake_case__ : tuple[int, int] , snake_case__ : tuple[float, float] , snake_case__ : float = 0.0 , ):
snake_case__ : Any = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case__ : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Any = int(scale_x * output_size[1] )
snake_case__ : Tuple = int(scale_y * output_size[0] )
snake_case__ : List[str] = []
snake_case__ : List[str] = []
for i, index in enumerate(snake_case__ ):
snake_case__ : Any = all_img_list[index]
path_list.append(snake_case__ )
snake_case__ : Tuple = all_annos[index]
snake_case__ : Optional[int] = cva.imread(snake_case__ )
if i == 0: # top-left
snake_case__ : Dict = cva.resize(snake_case__ , (divid_point_x, divid_point_y) )
snake_case__ : Any = img
for bbox in img_annos:
snake_case__ : Tuple = bbox[1] * scale_x
snake_case__ : Union[str, Any] = bbox[2] * scale_y
snake_case__ : int = bbox[3] * scale_x
snake_case__ : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case__ : Tuple = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) )
snake_case__ : Optional[int] = img
for bbox in img_annos:
snake_case__ : List[Any] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : Dict = bbox[2] * scale_y
snake_case__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case__ : Tuple = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Union[str, Any] = img
for bbox in img_annos:
snake_case__ : List[Any] = bbox[1] * scale_x
snake_case__ : Dict = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : int = bbox[3] * scale_x
snake_case__ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case__ : Any = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Tuple = img
for bbox in img_annos:
snake_case__ : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : str = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : List[str] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case__ : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( snake_case__ : int ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ : Dict = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase = None ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = value
snake_case__ : Node | None = None # Added in order to delete a node easier
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase = None ) -> Any:
"""simple docstring"""
snake_case__ : int = root
def __str__( self ) -> str:
"""simple docstring"""
return str(self.root )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
snake_case__ : Tuple = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase ): # If it is the right children
snake_case__ : Optional[int] = new_children
else:
snake_case__ : Union[str, Any] = new_children
else:
snake_case__ : Any = new_children
def lowercase__ ( self , lowerCamelCase ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase__ ( self ) -> bool:
"""simple docstring"""
return self.root is None
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : List[str] = Node(lowerCamelCase ) # create a new Node
if self.empty(): # if Tree is empty
snake_case__ : Union[str, Any] = new_node # set its root
else: # Tree is not empty
snake_case__ : Tuple = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case__ : int = new_node # We insert the new node in a leaf
break
else:
snake_case__ : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
snake_case__ : int = new_node
break
else:
snake_case__ : str = parent_node.right
snake_case__ : Dict = parent_node
def lowercase__ ( self , *lowerCamelCase ) -> None:
"""simple docstring"""
for value in values:
self.__insert(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
snake_case__ : Optional[int] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case__ : List[Any] = node.left if value < node.value else node.right
return node
def lowercase__ ( self , lowerCamelCase = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
snake_case__ : Optional[int] = self.root
if not self.empty():
while node.right is not None:
snake_case__ : Union[str, Any] = node.right
return node
def lowercase__ ( self , lowerCamelCase = None ) -> Node | None:
"""simple docstring"""
if node is None:
snake_case__ : Tuple = self.root
if self.root is None:
return None
if not self.empty():
snake_case__ : List[Any] = self.root
while node.left is not None:
snake_case__ : Optional[Any] = node.left
return node
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Any = self.search(lowerCamelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase , lowerCamelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase , node.left )
else:
snake_case__ : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case__ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase__ ( self , lowerCamelCase ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase__ ( self , lowerCamelCase=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if node:
self.inorder(lowerCamelCase , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase , node.right )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : list[int] = []
self.inorder(lowerCamelCase , lowerCamelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def _A ( snake_case__ : Node | None ):
snake_case__ : str = []
if curr_node is not None:
snake_case__ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _A ( ):
snake_case__ : Union[str, Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case__ : List[str] = BinarySearchTree()
for i in testlist:
t.insert(snake_case__ )
# Prints all the elements of the list in order traversal
print(snake_case__ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(snake_case__ )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 694 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : List[str] = dataset
snake_case__ : Dict = process
snake_case__ : str = params
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = self.dataset[i]
snake_case__ : str = self.process(lowerCamelCase , **self.params )
return processed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = loader
snake_case__ : Dict = infer
snake_case__ : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case__ : Tuple = None
snake_case__ : Optional[Any] = loader_batch_size
# Internal bookkeeping
snake_case__ : Tuple = None
snake_case__ : int = None
def __len__( self ) -> List[str]:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = iter(self.loader )
return self
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case__ : Dict = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case__ : List[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
# Convert ModelOutput to tuple first
snake_case__ : Optional[int] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case__ : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case__ : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case__ : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case__ : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case__ : List[str] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case__ : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case__ : List[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case__ : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case__ : Dict = self._loader_batch_data.__class__(lowerCamelCase )
self._loader_batch_index += 1
return result
def lowercase__ ( self ) -> Any:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case__ : List[str] = next(self.iterator )
snake_case__ : Union[str, Any] = self.infer(lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase , torch.Tensor ):
snake_case__ : Any = processed
else:
snake_case__ : Optional[Any] = list(processed.keys() )[0]
snake_case__ : Optional[int] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Optional[int] = len(lowerCamelCase )
else:
snake_case__ : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case__ : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
snake_case__ : List[Any] = processed
snake_case__ : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __iter__( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] = iter(self.loader )
snake_case__ : Union[str, Any] = None
return self
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
if self.subiterator is None:
snake_case__ : str = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case__ : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case__ : Dict = self.infer(next(self.iterator ) , **self.params )
snake_case__ : Dict = next(self.subiterator )
return processed
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __iter__( self ) -> Any:
"""simple docstring"""
snake_case__ : int = iter(self.loader )
return self
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = False
snake_case__ : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case__ : Optional[Any] = self.loader_batch_item()
snake_case__ : Optional[int] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
while not is_last:
snake_case__ : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase , torch.Tensor ):
snake_case__ : Tuple = processed
else:
snake_case__ : Union[str, Any] = list(processed.keys() )[0]
snake_case__ : str = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : str = len(lowerCamelCase )
else:
snake_case__ : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case__ : Union[str, Any] = observed_batch_size
snake_case__ : List[Any] = processed
snake_case__ : Dict = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case__ : Optional[Any] = self.loader_batch_item()
snake_case__ : List[Any] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
else:
snake_case__ : Any = processed
snake_case__ : Union[str, Any] = item.pop('''is_last''' )
accumulator.append(lowerCamelCase )
return accumulator
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = dataset
snake_case__ : Optional[Any] = key
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = dataset
snake_case__ : Union[str, Any] = keya
snake_case__ : Any = keya
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _A ( snake_case__ : NDArray[floataa] , snake_case__ : NDArray[floataa] , snake_case__ : list[int] , snake_case__ : int , ):
snake_case__ ,snake_case__ : Dict = coefficient_matrix.shape
snake_case__ ,snake_case__ : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
snake_case__ : List[str] = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(snake_case__ )
if colsa != 1:
snake_case__ : Any = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(snake_case__ )
if rowsa != rowsa:
snake_case__ : Dict = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(snake_case__ )
if len(snake_case__ ) != rowsa:
snake_case__ : str = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'''matrix but received {len(snake_case__ )} and {rowsa}'''
)
raise ValueError(snake_case__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
snake_case__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
snake_case__ ,snake_case__ : List[str] = table.shape
strictly_diagonally_dominant(snake_case__ )
# Iterates the whole matrix for given number of times
for _ in range(snake_case__ ):
snake_case__ : str = []
for row in range(snake_case__ ):
snake_case__ : Any = 0
for col in range(snake_case__ ):
if col == row:
snake_case__ : Union[str, Any] = table[row][col]
elif col == cols - 1:
snake_case__ : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
snake_case__ : Dict = (temp + val) / denom
new_val.append(snake_case__ )
snake_case__ : Dict = new_val
return [float(snake_case__ ) for i in new_val]
def _A ( snake_case__ : NDArray[floataa] ):
snake_case__ ,snake_case__ : Union[str, Any] = table.shape
snake_case__ : Dict = True
for i in range(0 , snake_case__ ):
snake_case__ : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 1 |
'''simple docstring'''
from random import randint, random
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ):
snake_case__ : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = max(snake_case__ , 0 )
while i < number_of_cells:
snake_case__ : Optional[int] = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _A ( snake_case__ : list , snake_case__ : int ):
snake_case__ : List[str] = 0
snake_case__ : str = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def _A ( snake_case__ : list , snake_case__ : float , snake_case__ : int ):
snake_case__ : int = len(snake_case__ )
# Beforce calculations, the highway is empty
snake_case__ : Any = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
snake_case__ : Optional[Any] = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
snake_case__ : int = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
snake_case__ : Tuple = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
snake_case__ : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _A ( snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ):
snake_case__ : Union[str, Any] = len(highway[0] )
for i in range(snake_case__ ):
snake_case__ : Any = update(highway[i] , snake_case__ , snake_case__ )
snake_case__ : str = [-1] * number_of_cells
for car_index in range(snake_case__ ):
snake_case__ : Optional[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
snake_case__ : Optional[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
snake_case__ : Dict = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = tempfile.mkdtemp()
# fmt: off
snake_case__ : Dict = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case__ : Optional[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ : Optional[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
snake_case__ : List[str] = {'''unk_token''': '''<unk>'''}
snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase ) )
snake_case__ : List[str] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase__ ( self , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase__ ( self , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case__ : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[str] = self.get_rust_tokenizer()
snake_case__ : Any = self.get_image_processor()
snake_case__ : Optional[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
snake_case__ : str = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case__ : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
snake_case__ : int = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Any = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Union[str, Any] = self.prepare_image_inputs()
snake_case__ : Optional[int] = image_processor(lowerCamelCase , return_tensors='''np''' )
snake_case__ : int = processor(images=lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Tuple = '''lower newer'''
snake_case__ : Optional[Any] = processor(text=lowerCamelCase )
snake_case__ : int = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : str = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Union[str, Any] = '''lower newer'''
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Union[str, Any] = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Dict = self.prepare_image_inputs()
snake_case__ : Optional[Any] = self.prepare_image_inputs()
snake_case__ : str = processor(images=lowerCamelCase , visual_prompt=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : str = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Tuple = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : List[Any] = processor.batch_decode(lowerCamelCase )
snake_case__ : List[str] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=False , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case__ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case__ : str = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : str = num_channels
snake_case__ : Union[str, Any] = image_size
snake_case__ : Tuple = min_resolution
snake_case__ : str = max_resolution
snake_case__ : Optional[Any] = do_resize
snake_case__ : List[str] = size
snake_case__ : Optional[Any] = do_center_crop
snake_case__ : Tuple = crop_size
snake_case__ : Any = do_normalize
snake_case__ : int = image_mean
snake_case__ : Union[str, Any] = image_std
snake_case__ : List[Any] = do_reduce_labels
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _A ( ):
snake_case__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case__ : List[Any] = Image.open(dataset[0]['''file'''] )
snake_case__ : Optional[int] = Image.open(dataset[1]['''file'''] )
return image, map
def _A ( ):
snake_case__ : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case__ : Union[str, Any] = Image.open(ds[0]['''file'''] )
snake_case__ : Union[str, Any] = Image.open(ds[1]['''file'''] )
snake_case__ : Dict = Image.open(ds[2]['''file'''] )
snake_case__ : Optional[int] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = BeitImageProcessor if is_vision_available() else None
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = BeitImageProcessingTester(self )
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''image_std''' ) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ : List[str] = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ : Dict = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ : Optional[int] = image_processing(lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
snake_case__ : List[str] = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case__ : Tuple = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case__ ,snake_case__ : List[Any] = prepare_semantic_single_inputs()
snake_case__ : Any = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case__ ,snake_case__ : Union[str, Any] = prepare_semantic_batch_inputs()
snake_case__ : Dict = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case__ ,snake_case__ : Optional[int] = prepare_semantic_single_inputs()
snake_case__ : Optional[int] = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case__ : List[str] = True
snake_case__ : Tuple = image_processing(lowerCamelCase , lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCAmelCase : Any = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
if dataset.ndim != value_array.ndim:
snake_case__ : Any = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case__ : Optional[int] = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
snake_case__ : int = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(snake_case__ )
snake_case__ : Dict = []
for value in value_array:
snake_case__ : str = euclidean(snake_case__ , dataset[0] )
snake_case__ : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case__ : Dict = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
snake_case__ : Union[str, Any] = temp_dist
snake_case__ : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray ):
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase : Dict = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_lowerCAmelCase : Optional[Any] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def _A ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , '''sklearn''' )
return (preds == labels).mean()
def _A ( snake_case__ : int , snake_case__ : Optional[int] ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , '''sklearn''' )
snake_case__ : int = simple_accuracy(snake_case__ , snake_case__ )
snake_case__ : Tuple = fa_score(y_true=snake_case__ , y_pred=snake_case__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , '''sklearn''' )
snake_case__ : List[str] = pearsonr(snake_case__ , snake_case__ )[0]
snake_case__ : Optional[Any] = spearmanr(snake_case__ , snake_case__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , '''sklearn''' )
assert len(snake_case__ ) == len(snake_case__ ), f'''Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(snake_case__ , snake_case__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "mrpc":
return acc_and_fa(snake_case__ , snake_case__ )
elif task_name == "sts-b":
return pearson_and_spearman(snake_case__ , snake_case__ )
elif task_name == "qqp":
return acc_and_fa(snake_case__ , snake_case__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , '''sklearn''' )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(snake_case__ )
| 694 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 1 |
'''simple docstring'''
import os
import string
import sys
_lowerCAmelCase : Optional[int] = 1 << 8
_lowerCAmelCase : Tuple = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
_lowerCAmelCase : List[Any] = KEYMAP["up"]
_lowerCAmelCase : Tuple = KEYMAP["left"]
if sys.platform == "win32":
_lowerCAmelCase : str = []
_lowerCAmelCase : int = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
_lowerCAmelCase : List[str] = ord(str(i))
def _A ( ):
if os.name == "nt":
import msvcrt
snake_case__ : Tuple = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case__ ) == 0:
# Read the keystroke
snake_case__ : Dict = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case__ : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case__ : List[str] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(snake_case__ )
if ord(snake_case__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
snake_case__ : Union[str, Any] = chr(KEYMAP['''esc'''] )
except KeyError:
snake_case__ : Optional[Any] = cha[1]
else:
snake_case__ : Any = ch.decode(snake_case__ )
else:
snake_case__ : List[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case__ : Optional[Any] = sys.stdin.fileno()
snake_case__ : Optional[int] = termios.tcgetattr(snake_case__ )
try:
tty.setraw(snake_case__ )
snake_case__ : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case__ , termios.TCSADRAIN , snake_case__ )
return ch
def _A ( ):
snake_case__ : str = get_raw_chars()
if ord(snake_case__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case__ ) == KEYMAP["esc"]:
snake_case__ : List[str] = get_raw_chars()
if ord(snake_case__ ) == KEYMAP["mod_int"]:
snake_case__ : List[str] = get_raw_chars()
if ord(snake_case__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : int , snake_case__ : int ):
return number | (1 << position)
def _A ( snake_case__ : int , snake_case__ : int ):
return number & ~(1 << position)
def _A ( snake_case__ : int , snake_case__ : int ):
return number ^ (1 << position)
def _A ( snake_case__ : int , snake_case__ : int ):
return ((number >> position) & 1) == 1
def _A ( snake_case__ : int , snake_case__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 | 1 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( snake_case__ : List[Any] ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(snake_case__ , '''_dynamo''' ):
return False
return isinstance(snake_case__ , torch._dynamo.eval_frame.OptimizedModule )
def _A ( snake_case__ : Dict , snake_case__ : bool = True ):
snake_case__ : Dict = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
snake_case__ : Tuple = is_compiled_module(snake_case__ )
if is_compiled:
snake_case__ : Optional[Any] = model
snake_case__ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case__ , snake_case__ ):
snake_case__ : Optional[Any] = model.module
if not keep_fpaa_wrapper:
snake_case__ : str = getattr(snake_case__ , '''forward''' )
snake_case__ : int = model.__dict__.pop('''_original_forward''' , snake_case__ )
if original_forward is not None:
while hasattr(snake_case__ , '''__wrapped__''' ):
snake_case__ : Dict = forward.__wrapped__
if forward == original_forward:
break
snake_case__ : List[str] = forward
if getattr(snake_case__ , '''_converted_to_transformer_engine''' , snake_case__ ):
convert_model(snake_case__ , to_transformer_engine=snake_case__ )
if is_compiled:
snake_case__ : int = model
snake_case__ : Any = compiled_model
return model
def _A ( ):
PartialState().wait_for_everyone()
def _A ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case__ , snake_case__ )
elif PartialState().local_process_index == 0:
torch.save(snake_case__ , snake_case__ )
@contextmanager
def _A ( **snake_case__ : Tuple ):
for key, value in kwargs.items():
snake_case__ : Union[str, Any] = str(snake_case__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( snake_case__ : List[Any] ):
if not hasattr(snake_case__ , '''__qualname__''' ) and not hasattr(snake_case__ , '''__name__''' ):
snake_case__ : List[str] = getattr(snake_case__ , '''__class__''' , snake_case__ )
if hasattr(snake_case__ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(snake_case__ , '''__name__''' ):
return obj.__name__
return str(snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Optional[int] ):
for key, value in source.items():
if isinstance(snake_case__ , snake_case__ ):
snake_case__ : List[str] = destination.setdefault(snake_case__ , {} )
merge_dicts(snake_case__ , snake_case__ )
else:
snake_case__ : str = value
return destination
def _A ( snake_case__ : int = None ):
if port is None:
snake_case__ : Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'markuplm'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=256 , lowerCamelCase=1024 , lowerCamelCase=216 , lowerCamelCase=1001 , lowerCamelCase=32 , lowerCamelCase=50 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = position_embedding_type
snake_case__ : Dict = use_cache
snake_case__ : int = classifier_dropout
# additional properties
snake_case__ : Union[str, Any] = max_depth
snake_case__ : Dict = max_xpath_tag_unit_embeddings
snake_case__ : Any = max_xpath_subs_unit_embeddings
snake_case__ : int = tag_pad_id
snake_case__ : Tuple = subs_pad_id
snake_case__ : Dict = xpath_unit_hidden_size
| 694 | 1 |
'''simple docstring'''
from math import sqrt
def _A ( snake_case__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( snake_case__ : int = 1_00_01 ):
snake_case__ : Dict = 0
snake_case__ : List[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(snake_case__ ):
count += 1
while count != nth:
number += 2
if is_prime(snake_case__ ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'instructblip_vision_model'
def __init__( self , lowerCamelCase=1408 , lowerCamelCase=6144 , lowerCamelCase=39 , lowerCamelCase=16 , lowerCamelCase=224 , lowerCamelCase=14 , lowerCamelCase="gelu" , lowerCamelCase=1E-6 , lowerCamelCase=0.0 , lowerCamelCase=1E-1_0 , lowerCamelCase=True , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Optional[Any] = hidden_size
snake_case__ : str = intermediate_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[Any] = patch_size
snake_case__ : Tuple = image_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : List[str] = layer_norm_eps
snake_case__ : List[Any] = hidden_act
snake_case__ : Optional[Any] = qkv_bias
@classmethod
def lowercase__ ( cls , lowerCamelCase , **lowerCamelCase ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase )
snake_case__ ,snake_case__ : Any = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
snake_case__ : List[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'instructblip_qformer'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=1E-1_2 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=2 , lowerCamelCase=1408 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
snake_case__ : int = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : List[Any] = cross_attention_frequency
snake_case__ : Optional[Any] = encoder_hidden_size
@classmethod
def lowercase__ ( cls , lowerCamelCase , **lowerCamelCase ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase )
snake_case__ ,snake_case__ : List[str] = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
snake_case__ : Optional[int] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'instructblip'
_lowerCAmelCase = True
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=32 , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
if vision_config is None:
snake_case__ : Tuple = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
snake_case__ : List[Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
snake_case__ : Optional[Any] = InstructBlipVisionConfig(**lowerCamelCase )
snake_case__ : Any = InstructBlipQFormerConfig(**lowerCamelCase )
snake_case__ : Union[str, Any] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
snake_case__ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**lowerCamelCase )
snake_case__ : Any = self.text_config.tie_word_embeddings
snake_case__ : Dict = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Tuple = self.vision_config.hidden_size
snake_case__ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : Dict = 1.0
snake_case__ : Union[str, Any] = 0.02
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ) -> Any:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCamelCase , )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = copy.deepcopy(self.__dict__ )
snake_case__ : Any = self.vision_config.to_dict()
snake_case__ : Union[str, Any] = self.qformer_config.to_dict()
snake_case__ : Dict = self.text_config.to_dict()
snake_case__ : Dict = self.__class__.model_type
return output
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[float] , snake_case__ : list[float] ):
snake_case__ : Dict = sorted(numsa + numsa )
snake_case__ ,snake_case__ : Tuple = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_lowerCAmelCase : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 694 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_lowerCAmelCase : Dict = random.Random()
def _A ( snake_case__ : Union[str, Any] , snake_case__ : List[Any]=1.0 , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None ):
if rng is None:
snake_case__ : str = global_rng
snake_case__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=24 , lowerCamelCase=24 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : str = min_seq_length
snake_case__ : List[str] = max_seq_length
snake_case__ : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : Any = feature_size
snake_case__ : Union[str, Any] = num_mel_bins
snake_case__ : List[str] = padding_value
snake_case__ : List[Any] = sampling_rate
snake_case__ : Optional[int] = return_attention_mask
snake_case__ : Tuple = do_normalize
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self , lowerCamelCase=False , lowerCamelCase=False ) -> List[Any]:
"""simple docstring"""
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
snake_case__ : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : Optional[int] = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : int = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case__ : Optional[int] = feature_extractor(lowerCamelCase , padding=lowerCamelCase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
snake_case__ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case__ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
snake_case__ : str = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
snake_case__ : str = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case__ : Tuple = np.asarray(lowerCamelCase )
snake_case__ : Union[str, Any] = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
snake_case__ : List[str] = feature_extractor(lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case__ : Dict = [None, 16, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
snake_case__ : Tuple = feature_extractor(
lowerCamelCase , padding=lowerCamelCase , max_length=lowerCamelCase , return_attention_mask=lowerCamelCase )
snake_case__ : Any = inputs.input_features
snake_case__ : int = inputs.attention_mask
snake_case__ : Optional[int] = [np.sum(lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : str = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case__ : int = [None, 16, None]
for max_length, padding in zip(lowerCamelCase , lowerCamelCase ):
snake_case__ : List[str] = feature_extractor(
lowerCamelCase , max_length=lowerCamelCase , padding=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase )
snake_case__ : List[Any] = inputs.input_features
snake_case__ : List[str] = inputs.attention_mask
snake_case__ : Dict = [np.sum(lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Optional[int] = feature_extractor(
lowerCamelCase , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase , )
snake_case__ : Union[str, Any] = inputs.input_features
snake_case__ : Optional[int] = inputs.attention_mask
snake_case__ : Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Optional[Any] = feature_extractor(
lowerCamelCase , padding='''longest''' , max_length=4 , truncation=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase , )
snake_case__ : int = inputs.input_features
snake_case__ : Dict = inputs.attention_mask
snake_case__ : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Tuple = feature_extractor(
lowerCamelCase , padding='''longest''' , max_length=16 , truncation=lowerCamelCase , return_tensors='''np''' , return_attention_mask=lowerCamelCase , )
snake_case__ : Optional[int] = inputs.input_features
snake_case__ : List[Any] = inputs.attention_mask
snake_case__ : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
import torch
snake_case__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case__ : Dict = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
from datasets import load_dataset
snake_case__ : Union[str, Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case__ : Optional[int] = ds.sort('''id''' ).select(range(lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
snake_case__ : Dict = self._load_datasamples(1 )
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = feature_extractor(lowerCamelCase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 694 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
super().__init__()
snake_case__ : str = nn.ModuleList(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase , lowerCamelCase , self.nets ) ):
snake_case__ ,snake_case__ : Optional[int] = controlnet(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# merge samples
if i == 0:
snake_case__ ,snake_case__ : Tuple = down_samples, mid_sample
else:
snake_case__ : Optional[Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase , lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase , is_main_process=lowerCamelCase , save_function=lowerCamelCase , safe_serialization=lowerCamelCase , variant=lowerCamelCase , )
idx += 1
snake_case__ : Optional[Any] = model_path_to_save + f'''_{idx}'''
@classmethod
def lowercase__ ( cls , lowerCamelCase , **lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = 0
snake_case__ : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case__ : Optional[Any] = pretrained_model_path
while os.path.isdir(lowerCamelCase ):
snake_case__ : List[Any] = ControlNetModel.from_pretrained(lowerCamelCase , **lowerCamelCase )
controlnets.append(lowerCamelCase )
idx += 1
snake_case__ : Dict = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowerCamelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowerCamelCase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowerCamelCase )
| 694 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : str , snake_case__ : str ):
assert x is not None
assert y is not None
snake_case__ : Union[str, Any] = len(snake_case__ )
snake_case__ : List[str] = len(snake_case__ )
# declaring the array for storing the dp values
snake_case__ : Union[str, Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
snake_case__ : Any = 1 if x[i - 1] == y[j - 1] else 0
snake_case__ : Optional[int] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
snake_case__ : str = ''''''
snake_case__ ,snake_case__ : List[Any] = m, n
while i > 0 and j > 0:
snake_case__ : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
snake_case__ : str = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = "AGGTAB"
_lowerCAmelCase : Optional[Any] = "GXTXAYB"
_lowerCAmelCase : Any = 4
_lowerCAmelCase : str = "GTAB"
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCAmelCase : Union[str, Any] = "\nimport os\n"
_lowerCAmelCase : Optional[int] = "\ndef foo():\n import os\n return False\n"
_lowerCAmelCase : Union[str, Any] = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowerCAmelCase : str = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : str = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowerCAmelCase : Tuple = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowerCAmelCase : List[str] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowerCAmelCase : Optional[int] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowerCAmelCase : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowerCAmelCase : Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case__ )
def _A ( snake_case__ : List[str] , snake_case__ : Dict ):
snake_case__ : str = os.path.join(snake_case__ , '''test_file.py''' )
with open(snake_case__ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case__ )
snake_case__ : int = get_imports(snake_case__ )
assert parsed_imports == ["os"]
| 694 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCAmelCase : Any = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Union[str, Any] = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(lowerCamelCase ) )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Union[str, Any] = [sequences]
snake_case__ : Union[str, Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase=ZeroShotClassificationArgumentHandler() , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = args_parser
super().__init__(*lowerCamelCase , **lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=TruncationStrategy.ONLY_FIRST , **lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
snake_case__ : List[Any] = self.tokenizer.eos_token
try:
snake_case__ : List[Any] = self.tokenizer(
lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , )
except Exception as e:
if "too short" in str(lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case__ : Optional[Any] = self.tokenizer(
lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , padding=lowerCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowercase__ ( self , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
if kwargs.get('''multi_class''' , lowerCamelCase ) is not None:
snake_case__ : Tuple = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
snake_case__ : Union[str, Any] = {}
if "candidate_labels" in kwargs:
snake_case__ : List[Any] = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
snake_case__ : Dict = kwargs['''hypothesis_template''']
snake_case__ : Tuple = {}
if "multi_label" in kwargs:
snake_case__ : str = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
if len(lowerCamelCase ) == 0:
pass
elif len(lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
snake_case__ : str = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase="This example is {}." ) -> Any:
"""simple docstring"""
snake_case__ ,snake_case__ : Dict = self._args_parser(lowerCamelCase , lowerCamelCase , lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase , lowerCamelCase ) ):
snake_case__ : List[str] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase ) - 1,
**model_input,
}
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : int = inputs['''candidate_label''']
snake_case__ : List[Any] = inputs['''sequence''']
snake_case__ : Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case__ : int = self.model(**lowerCamelCase )
snake_case__ : Union[str, Any] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = [outputs['''candidate_label'''] for outputs in model_outputs]
snake_case__ : List[str] = [outputs['''sequence'''] for outputs in model_outputs]
snake_case__ : Any = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
snake_case__ : List[Any] = logits.shape[0]
snake_case__ : str = len(lowerCamelCase )
snake_case__ : Tuple = N // n
snake_case__ : List[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case__ : List[str] = self.entailment_id
snake_case__ : Tuple = -1 if entailment_id == 0 else 0
snake_case__ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case__ : List[str] = np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1 , keepdims=lowerCamelCase )
snake_case__ : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case__ : Dict = reshaped_outputs[..., self.entailment_id]
snake_case__ : List[str] = np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1 , keepdims=lowerCamelCase )
snake_case__ : str = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 694 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def _A ( snake_case__ : List[Any] ):
# initialize config
if "resnet-50" in model_name:
snake_case__ : Optional[int] = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
snake_case__ : Tuple = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
snake_case__ : int = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
snake_case__ : List[Any] = '''panoptic''' in model_name
if is_panoptic:
snake_case__ : Tuple = 2_50
else:
snake_case__ : Tuple = 91
snake_case__ : List[str] = '''huggingface/label-files'''
snake_case__ : List[Any] = '''coco-detection-id2label.json'''
snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _A ( snake_case__ : int ):
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ : Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def _A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] ):
snake_case__ : int = state_dict.pop(snake_case__ )
snake_case__ : Optional[int] = val
def _A ( snake_case__ : Dict , snake_case__ : str=False ):
snake_case__ : str = ''''''
if is_panoptic:
snake_case__ : int = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ : str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : List[str] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[:2_56, :]
snake_case__ : List[str] = in_proj_bias[:2_56]
snake_case__ : Union[str, Any] = in_proj_weight[2_56:5_12, :]
snake_case__ : Union[str, Any] = in_proj_bias[2_56:5_12]
snake_case__ : Optional[int] = in_proj_weight[-2_56:, :]
snake_case__ : Optional[Any] = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case__ : Dict = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case__ : str = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[:2_56, :]
snake_case__ : str = in_proj_bias[:2_56]
snake_case__ : Union[str, Any] = in_proj_weight[2_56:5_12, :]
snake_case__ : Union[str, Any] = in_proj_bias[2_56:5_12]
snake_case__ : List[str] = in_proj_weight[-2_56:, :]
snake_case__ : int = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
snake_case__ : Dict = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
snake_case__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case__ : Tuple = in_proj_weight_cross_attn[:2_56, :]
snake_case__ : Any = in_proj_bias_cross_attn[:2_56]
snake_case__ : List[str] = in_proj_weight_cross_attn[2_56:5_12, :]
snake_case__ : List[str] = in_proj_bias_cross_attn[2_56:5_12]
snake_case__ : Optional[Any] = in_proj_weight_cross_attn[-2_56:, :]
snake_case__ : Tuple = in_proj_bias_cross_attn[-2_56:]
def _A ( ):
snake_case__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _A ( snake_case__ : str , snake_case__ : str=None , snake_case__ : int=False ):
snake_case__ ,snake_case__ : List[Any] = get_detr_config(snake_case__ )
# load original model from torch hub
snake_case__ : List[Any] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'''Converting model {model_name}...''' )
snake_case__ : Tuple = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
snake_case__ : Optional[int] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
snake_case__ : Dict = '''detr.''' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ : str = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
snake_case__ : str = state_dict.pop(snake_case__ )
snake_case__ : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case__ : List[Any] = state_dict.pop(snake_case__ )
snake_case__ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
snake_case__ : Any = state_dict.pop(snake_case__ )
snake_case__ : List[Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case__ : Union[str, Any] = state_dict.pop(snake_case__ )
snake_case__ : Dict = val
# finally, create HuggingFace model and load state dict
snake_case__ : Optional[Any] = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
snake_case__ : str = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case__ : str = DetrImageProcessor(format=snake_case__ )
snake_case__ : Union[str, Any] = processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ : Optional[int] = encoding['''pixel_values''']
snake_case__ : Tuple = detr(snake_case__ )
snake_case__ : List[str] = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
_lowerCAmelCase : Any = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 694 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_lowerCAmelCase : List[str] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_lowerCAmelCase : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _A ( snake_case__ : str ):
with open(snake_case__ , '''rb''' ) as f:
snake_case__ : List[Any] = Image.open(snake_case__ )
return im.convert('''RGB''' )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the training data.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the validation data.'} )
_lowerCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self ) -> int:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowerCamelCase )} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _A ( snake_case__ : str ):
snake_case__ : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
snake_case__ : Optional[Any] = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ ,snake_case__ ,snake_case__ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ : Any = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case__ : List[str] = {}
if data_args.train_dir is not None:
snake_case__ : List[str] = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
snake_case__ : Dict = os.path.join(data_args.validation_dir , '''**''' )
snake_case__ : Tuple = load_dataset(
'''imagefolder''' , data_files=snake_case__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case__ : List[str] = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
snake_case__ : Optional[Any] = dataset['''train'''].train_test_split(data_args.train_val_split )
snake_case__ : Dict = split['''train''']
snake_case__ : Optional[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case__ : int = dataset['''train'''].features['''labels'''].names
snake_case__ ,snake_case__ : Tuple = {}, {}
for i, label in enumerate(snake_case__ ):
snake_case__ : Union[str, Any] = str(snake_case__ )
snake_case__ : str = label
# Load the accuracy metric from the datasets package
snake_case__ : Optional[int] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : List[str] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel=snake_case__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case__ : Any = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case__ : str = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case__ : Optional[Any] = image_processor.size['''shortest_edge''']
else:
snake_case__ : Optional[int] = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case__ : Optional[int] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case__ : Any = Compose(
[
RandomResizedCrop(snake_case__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case__ : Optional[Any] = Compose(
[
Resize(snake_case__ ),
CenterCrop(snake_case__ ),
ToTensor(),
normalize,
] )
def train_transforms(snake_case__ : Tuple ):
snake_case__ : Optional[Any] = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(snake_case__ : Any ):
snake_case__ : Any = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case__ : Optional[int] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case__ : Optional[Any] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(snake_case__ )
# Initalize our trainer
snake_case__ : Optional[Any] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
snake_case__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : int = last_checkpoint
snake_case__ : Optional[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Write model card and (optionally) push to hub
snake_case__ : Any = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
from math import isqrt
def _A ( snake_case__ : int ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def _A ( snake_case__ : int = 10**6 ):
snake_case__ : str = 0
snake_case__ : List[str] = 1
snake_case__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : list[int] , snake_case__ : int ):
snake_case__ : Tuple = len(snake_case__ )
snake_case__ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case__ : List[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case__ : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case__ : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case__ : Tuple = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=64 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_input_mask
snake_case__ : List[str] = use_token_type_ids
snake_case__ : List[str] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : Dict = type_sequence_label_size
snake_case__ : Tuple = initializer_range
snake_case__ : Any = num_labels
snake_case__ : Union[str, Any] = num_choices
snake_case__ : Optional[int] = scope
snake_case__ : List[str] = vocab_size - 1
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Any = True
return config, input_ids, input_mask, token_labels
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Tuple = GPTNeoXModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )
snake_case__ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = True
snake_case__ : Optional[Any] = GPTNeoXModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = GPTNeoXForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Optional[int] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : str = self.num_labels
snake_case__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Any = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Dict = self.num_labels
snake_case__ : int = GPTNeoXForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[int] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Dict = GPTNeoXForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = True
snake_case__ : Dict = GPTNeoXForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
snake_case__ : Optional[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : str = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Tuple = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase )
snake_case__ : int = output_from_no_past['''hidden_states'''][0]
snake_case__ : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )['''hidden_states'''][0]
# select random slice
snake_case__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Any = config_and_inputs
snake_case__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Any = GPTNeoXModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=64 , num_attention_heads=8 )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Any = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowercase__ ( self , lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = ids_tensor([1, 10] , config.vocab_size )
snake_case__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Dict = GPTNeoXModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
snake_case__ : Tuple = original_model(lowerCamelCase ).last_hidden_state
snake_case__ : Optional[Any] = original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Dict = {'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : Dict = GPTNeoXModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
snake_case__ : List[str] = scaled_model(lowerCamelCase ).last_hidden_state
snake_case__ : Optional[int] = scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Any = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case__ : Any = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase )
snake_case__ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case__ : List[str] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case__ : List[str] = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 )
snake_case__ : List[str] = tokenizer.batch_decode(lowerCamelCase )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 694 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase = 65536 , lowerCamelCase = None , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 0 , lowerCamelCase = "fourier" , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = 0.0 , lowerCamelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase = "UNetMidBlock1D" , lowerCamelCase = None , lowerCamelCase = (32, 32, 64) , lowerCamelCase = None , lowerCamelCase = 8 , lowerCamelCase = 1 , lowerCamelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
snake_case__ : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase , log=lowerCamelCase , flip_sin_to_cos=lowerCamelCase )
snake_case__ : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case__ : Dict = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase , downscale_freq_shift=lowerCamelCase )
snake_case__ : Dict = block_out_channels[0]
if use_timestep_embedding:
snake_case__ : Any = block_out_channels[0] * 4
snake_case__ : Optional[Any] = TimestepEmbedding(
in_channels=lowerCamelCase , time_embed_dim=lowerCamelCase , act_fn=lowerCamelCase , out_dim=block_out_channels[0] , )
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : List[Any] = None
snake_case__ : Union[str, Any] = nn.ModuleList([] )
snake_case__ : List[str] = None
# down
snake_case__ : Tuple = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case__ : List[Any] = i == len(lowerCamelCase ) - 1
snake_case__ : Dict = get_down_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase )
# mid
snake_case__ : Optional[int] = get_mid_block(
lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase , add_downsample=lowerCamelCase , )
# up
snake_case__ : Union[str, Any] = list(reversed(lowerCamelCase ) )
snake_case__ : Any = reversed_block_out_channels[0]
if out_block_type is None:
snake_case__ : List[Any] = out_channels
else:
snake_case__ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
snake_case__ : List[str] = output_channel
snake_case__ : List[str] = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
snake_case__ : List[str] = i == len(lowerCamelCase ) - 1
snake_case__ : str = get_up_block(
lowerCamelCase , num_layers=lowerCamelCase , in_channels=lowerCamelCase , out_channels=lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase )
snake_case__ : Optional[Any] = output_channel
# out
snake_case__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
snake_case__ : Union[str, Any] = get_out_block(
out_block_type=lowerCamelCase , num_groups_out=lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase , act_fn=lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
snake_case__ : str = timestep
if not torch.is_tensor(lowerCamelCase ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
snake_case__ : Optional[Any] = timesteps[None].to(sample.device )
snake_case__ : Any = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
snake_case__ : Tuple = self.time_mlp(lowerCamelCase )
else:
snake_case__ : Union[str, Any] = timestep_embed[..., None]
snake_case__ : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
snake_case__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
snake_case__ : List[Any] = ()
for downsample_block in self.down_blocks:
snake_case__ ,snake_case__ : Optional[int] = downsample_block(hidden_states=lowerCamelCase , temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case__ : Any = self.mid_block(lowerCamelCase , lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
snake_case__ : str = down_block_res_samples[-1:]
snake_case__ : int = down_block_res_samples[:-1]
snake_case__ : Optional[Any] = upsample_block(lowerCamelCase , res_hidden_states_tuple=lowerCamelCase , temb=lowerCamelCase )
# 5. post-process
if self.out_block:
snake_case__ : Dict = self.out_block(lowerCamelCase , lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
def _A ( snake_case__ : List[str] ):
snake_case__ : List[Any] = []
snake_case__ : List[Any] = set({'''(''', '''[''', '''{'''} )
snake_case__ : Any = set({''')''', ''']''', '''}'''} )
snake_case__ : Union[str, Any] = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(snake_case__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(snake_case__ ) == 0 or (len(snake_case__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(snake_case__ ) == 0
def _A ( ):
snake_case__ : List[Any] = input('''Enter sequence of brackets: ''' )
if is_balanced(snake_case__ ):
print(snake_case__ , '''is balanced''' )
else:
print(snake_case__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : Tuple = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
snake_case__ : List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case__ : str = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case__ : Any = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case__ : Any = os.path.join(get_home_dir() , '''models''' )
snake_case__ : List[Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
snake_case__ : Optional[int] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
snake_case__ : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case__ : Union[str, Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case__ ),
}
snake_case__ : Dict = BertConfig.from_dict(snake_case__ )
snake_case__ : Dict = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ : str ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ : List[Any] , snake_case__ : Any ):
snake_case__ : Union[str, Any] = hf_param.shape
snake_case__ : Any = to_torch(params[gluon_param] )
snake_case__ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case__ : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case__ : BertSelfAttention = layer.attention.self
snake_case__ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
snake_case__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
snake_case__ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
snake_case__ : int = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
snake_case__ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
snake_case__ : BertSelfOutput = layer.attention.output
snake_case__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
snake_case__ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
snake_case__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
snake_case__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
snake_case__ : BertIntermediate = layer.intermediate
snake_case__ : int = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
snake_case__ : Optional[int] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
snake_case__ : BertOutput = layer.output
snake_case__ : Any = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
snake_case__ : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
snake_case__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case__ : str = tokenizer.encode_plus(snake_case__ )['''input_ids''']
# Get gluon output
snake_case__ : List[str] = mx.nd.array([input_ids] )
snake_case__ : Optional[int] = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
snake_case__ : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
snake_case__ : Optional[Any] = tokenizer.encode_plus(snake_case__ , return_tensors='''pt''' )
snake_case__ : str = hf_bort_model(**snake_case__ )[0]
snake_case__ : str = output_gluon[0].asnumpy()
snake_case__ : str = output_hf[0].detach().numpy()
snake_case__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case__ : Optional[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 694 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCAmelCase : Tuple = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
def _A ( snake_case__ : int = 4_00_00_00 ):
snake_case__ : int = []
snake_case__ ,snake_case__ : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
snake_case__ ,snake_case__ : Any = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 694 | 1 |
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<pad>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<mask_2>" , lowerCamelCase="<mask_1>" , lowerCamelCase=None , lowerCamelCase=103 , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
f''' {type(lowerCamelCase )}''' )
snake_case__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : List[Any] = additional_special_tokens_extended
else:
snake_case__ : Union[str, Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : List[Any] = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 694 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowerCAmelCase : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowerCAmelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowerCAmelCase : List[str] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 , lowerCamelCase="binary" , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = fa_score(
lowerCamelCase , lowerCamelCase , labels=lowerCamelCase , pos_label=lowerCamelCase , average=lowerCamelCase , sample_weight=lowerCamelCase )
return {"f1": float(lowerCamelCase ) if score.size == 1 else score}
| 694 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=0 ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = 1.0 if scale is None else scale
snake_case__ : Dict = 0.0 if loc is None else loc
super().__init__(lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase )] )
@property
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
return self.variance.sqrt()
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
snake_case__ : Tuple = args_dim
snake_case__ : str = nn.ModuleList([nn.Linear(lowerCamelCase , lowerCamelCase ) for dim in args_dim.values()] )
snake_case__ : Optional[int] = domain_map
def lowercase__ ( self , lowerCamelCase ) -> Tuple[torch.Tensor]:
"""simple docstring"""
snake_case__ : Any = [proj(lowerCamelCase ) for proj in self.proj]
return self.domain_map(*lowerCamelCase )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
snake_case__ : Tuple = function
def lowercase__ ( self , lowerCamelCase , *lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.function(lowerCamelCase , *lowerCamelCase )
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , lowerCamelCase = 1 ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] = dim
snake_case__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase )
else:
return Independent(self.distribution_class(*lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ) -> Distribution:
"""simple docstring"""
snake_case__ : List[Any] = self._base_distribution(lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase , loc=lowerCamelCase , scale=lowerCamelCase , event_dim=self.event_dim )
@property
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 0.0
def lowercase__ ( self , lowerCamelCase ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowercase__ ( self , *lowerCamelCase ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowercase__ ( lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase ) + 4.0 )) / 2.0
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
snake_case__ : Optional[int] = 2.0 + cls.squareplus(lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = cls.squareplus(lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowercase__ ( self , lowerCamelCase ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : str = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase , logits=lowerCamelCase ) , 1 )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ) -> Distribution:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 694 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_lowerCAmelCase : Optional[Any] = "docs/source/en/_toctree.yml"
def _A ( snake_case__ : Union[str, Any] ):
snake_case__ : int = defaultdict(snake_case__ )
snake_case__ : Dict = []
snake_case__ : Optional[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(snake_case__ )
snake_case__ : Any = new_doc_list
snake_case__ : Tuple = [key for key, value in counts.items() if value > 1]
snake_case__ : List[str] = []
for duplicate_key in duplicates:
snake_case__ : Union[str, Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
snake_case__ : List[str] = sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case__ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(snake_case__ )
# Sort
return overview_doc
def _A ( snake_case__ : str=False ):
with open(snake_case__ , encoding='''utf-8''' ) as f:
snake_case__ : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
snake_case__ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case__ : str = content[api_idx]['''sections''']
# Then to the model doc
snake_case__ : Any = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case__ : str = api_doc[scheduler_idx]['''sections''']
snake_case__ : Optional[Any] = clean_doc_toc(snake_case__ )
snake_case__ : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
snake_case__ : str = True
if overwrite:
snake_case__ : Dict = new_scheduler_doc
if diff:
if overwrite:
snake_case__ : str = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _A ( snake_case__ : List[Any]=False ):
with open(snake_case__ , encoding='''utf-8''' ) as f:
snake_case__ : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case__ : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case__ : Any = content[api_idx]['''sections''']
# Then to the model doc
snake_case__ : Union[str, Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case__ : int = False
snake_case__ : Dict = api_doc[pipeline_idx]['''sections''']
snake_case__ : str = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case__ : Optional[int] = pipeline_doc['''section''']
snake_case__ : Union[str, Any] = clean_doc_toc(snake_case__ )
if overwrite:
snake_case__ : Dict = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case__ )
# sort overall pipeline doc
snake_case__ : Dict = clean_doc_toc(snake_case__ )
if new_pipeline_docs != pipeline_docs:
snake_case__ : Optional[Any] = True
if overwrite:
snake_case__ : Dict = new_pipeline_docs
if diff:
if overwrite:
snake_case__ : List[str] = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCAmelCase : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 694 |
'''simple docstring'''
from math import factorial
def _A ( snake_case__ : int = 20 ):
snake_case__ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case__ : Union[str, Any] = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
_lowerCAmelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 694 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'The column name of the images in the files.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the training data.'} )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the validation data.'} )
_lowerCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[int] = {}
if self.train_dir is not None:
snake_case__ : int = self.train_dir
if self.validation_dir is not None:
snake_case__ : Optional[Any] = self.validation_dir
snake_case__ : Optional[Any] = data_files if data_files else None
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCAmelCase = field(default=__lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCAmelCase = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_lowerCAmelCase = field(
default=__lowerCamelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _A ( snake_case__ : List[Any] ):
snake_case__ : Dict = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ ,snake_case__ ,snake_case__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ ,snake_case__ ,snake_case__ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ : List[str] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
snake_case__ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case__ : Optional[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
snake_case__ : Optional[Any] = ds['''train'''].train_test_split(data_args.train_val_split )
snake_case__ : Optional[Any] = split['''train''']
snake_case__ : int = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Union[str, Any] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ : Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
snake_case__ : List[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
snake_case__ : Any = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case__ : Optional[int] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
snake_case__ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
snake_case__ : Union[str, Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case__ : List[str] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case__ : str = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
snake_case__ : Optional[int] = ds['''train'''].column_names
else:
snake_case__ : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
snake_case__ : Tuple = data_args.image_column_name
elif "image" in column_names:
snake_case__ : Tuple = '''image'''
elif "img" in column_names:
snake_case__ : Dict = '''img'''
else:
snake_case__ : List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case__ : List[Any] = image_processor.size['''shortest_edge''']
else:
snake_case__ : Optional[int] = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case__ : List[str] = Compose(
[
Lambda(lambda snake_case__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case__ : Optional[int] ):
snake_case__ : Optional[Any] = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case__ : Optional[int] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case__ : str = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
snake_case__ : Dict = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case__ : Dict = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
snake_case__ : List[str] = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
snake_case__ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : int = last_checkpoint
snake_case__ : str = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ : Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
# Write model card and (optionally) push to hub
snake_case__ : Dict = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def _A ( snake_case__ : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = (EulerDiscreteScheduler,)
_lowerCAmelCase = 1_0
def lowercase__ ( self , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCamelCase )
return config
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : int = model(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Tuple = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ : Optional[int] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Union[str, Any] = output.prev_sample
snake_case__ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : List[str] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Optional[int] = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Tuple = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : List[str] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model(lowerCamelCase , lowerCamelCase )
snake_case__ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : int = output.prev_sample
snake_case__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = self.dummy_model()
snake_case__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ : Optional[Any] = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
snake_case__ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
snake_case__ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
snake_case__ : Optional[int] = output.prev_sample
snake_case__ : Dict = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 694 | 1 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowerCAmelCase : Dict = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
_lowerCAmelCase : str = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
_lowerCAmelCase : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
_lowerCAmelCase : List[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def lowercase__ ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=0.9 , lowerCamelCase=3 , lowerCamelCase=0.5 ) -> Tuple:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
snake_case__ : int = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase ) , word_tokenize(lowerCamelCase ) , alpha=lowerCamelCase , beta=lowerCamelCase , gamma=lowerCamelCase )
for ref, pred in zip(lowerCamelCase , lowerCamelCase )
]
else:
snake_case__ : str = [
meteor_score.single_meteor_score(lowerCamelCase , lowerCamelCase , alpha=lowerCamelCase , beta=lowerCamelCase , gamma=lowerCamelCase )
for ref, pred in zip(lowerCamelCase , lowerCamelCase )
]
return {"meteor": np.mean(lowerCamelCase )}
| 694 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ['pixel_values']
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : int = do_resize
snake_case__ : Dict = do_rescale
snake_case__ : Any = size_divisor
snake_case__ : str = resample
super().__init__(**lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
snake_case__ ,snake_case__ : Any = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case__ : Any = height // size_divisor * size_divisor
snake_case__ : Union[str, Any] = width // size_divisor * size_divisor
snake_case__ : Tuple = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> BatchFeature:
"""simple docstring"""
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case__ : Dict = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
snake_case__ : Optional[Any] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
snake_case__ : Tuple = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
snake_case__ : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 694 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'poolformer'
def __init__( self , lowerCamelCase=3 , lowerCamelCase=16 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=4.0 , lowerCamelCase=[2, 2, 6, 2] , lowerCamelCase=[64, 128, 320, 512] , lowerCamelCase=[7, 3, 3, 3] , lowerCamelCase=[4, 2, 2, 2] , lowerCamelCase=[2, 1, 1, 1] , lowerCamelCase=4 , lowerCamelCase=0.0 , lowerCamelCase="gelu" , lowerCamelCase=True , lowerCamelCase=1E-5 , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = num_channels
snake_case__ : Any = patch_size
snake_case__ : Dict = stride
snake_case__ : int = padding
snake_case__ : Any = pool_size
snake_case__ : List[str] = hidden_sizes
snake_case__ : str = mlp_ratio
snake_case__ : Optional[Any] = depths
snake_case__ : Union[str, Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Any = drop_path_rate
snake_case__ : Dict = hidden_act
snake_case__ : Tuple = use_layer_scale
snake_case__ : Dict = layer_scale_init_value
snake_case__ : Tuple = initializer_range
super().__init__(**lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def lowercase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ) -> float:
"""simple docstring"""
return 2E-3
| 694 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowerCAmelCase : List[str] = "examples/"
_lowerCAmelCase : str = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
_lowerCAmelCase : Union[str, Any] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
_lowerCAmelCase : Dict = "README.md"
def _A ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str ):
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Any = f.read()
snake_case__ ,snake_case__ : int = REPLACE_PATTERNS[pattern]
snake_case__ : str = replace.replace('''VERSION''' , snake_case__ )
snake_case__ : Any = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(snake_case__ )
def _A ( snake_case__ : Any ):
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='''examples''' )
def _A ( snake_case__ : List[Any] , snake_case__ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def _A ( ):
snake_case__ : List[Any] = '''🤗 Transformers currently provides the following architectures'''
snake_case__ : Optional[Any] = '''1. Want to contribute a new model?'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ : Optional[Any] = f.readlines()
# Find the start of the list.
snake_case__ : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case__ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
snake_case__ : List[str] = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(snake_case__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case__ )
def _A ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
snake_case__ : Dict = f.read()
snake_case__ : Any = REPLACE_PATTERNS['''init'''][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def _A ( snake_case__ : List[str]=False ):
snake_case__ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
snake_case__ : Dict = default_version.base_version
elif patch:
snake_case__ : Optional[Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
snake_case__ : Union[str, Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
snake_case__ : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(snake_case__ ) == 0:
snake_case__ : Tuple = default_version
print(f'''Updating version to {version}.''' )
global_version_update(snake_case__ , patch=snake_case__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _A ( ):
snake_case__ : Optional[Any] = get_version()
snake_case__ : int = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
snake_case__ : Any = current_version.base_version
# Check with the user we got that right.
snake_case__ : int = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case__ ) == 0:
snake_case__ : List[Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(snake_case__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
_lowerCAmelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 694 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.