code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase ( a_ , a_ ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(a_ ) )
def UpperCAmelCase ( a_ , a_ ) -> float:
"""simple docstring"""
A_ : List[str] = 0.0
for coeff in reversed(a_ ):
A_ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase__ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase__ : List[str] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionDiffEditPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase = frozenset([] )
def UpperCAmelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
A_ : Tuple = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCamelCase , set_alpha_to_zero=_lowerCamelCase , )
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A_ : Any = CLIPTextModel(_lowerCamelCase )
A_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> List[Any]:
A_ : Union[str, Any] = floats_tensor((1, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : Tuple = torch.manual_seed(_lowerCamelCase )
else:
A_ : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Any = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> Tuple:
A_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Any = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" )
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : Any = torch.manual_seed(_lowerCamelCase )
else:
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Optional[int] = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> List[str]:
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Optional[int] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" )
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : str = torch.manual_seed(_lowerCamelCase )
else:
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Tuple = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
A_ : Tuple = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
A_ : str = self.get_dummy_inputs(_lowerCamelCase )
A_ : int = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
A_ : int = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F"`{optional_component}` did not stay set to None after loading." , )
A_ : List[str] = self.get_dummy_inputs(_lowerCamelCase )
A_ : Tuple = pipe_loaded(**_lowerCamelCase )[0]
A_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[Any] = """cpu"""
A_ : int = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : List[Any] = self.get_dummy_mask_inputs(_lowerCamelCase )
A_ : Optional[int] = pipe.generate_mask(**_lowerCamelCase )
A_ : Optional[int] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
A_ : Tuple = np.array([0] * 9 )
A_ : int = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = """cpu"""
A_ : Optional[Any] = self.get_dummy_components()
A_ : str = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[int] = self.get_dummy_inversion_inputs(_lowerCamelCase )
A_ : Optional[Any] = pipe.invert(**_lowerCamelCase ).images
A_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ : str = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
A_ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def UpperCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Any = """cpu"""
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Union[str, Any] = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
A_ : Optional[Any] = DPMSolverMultistepScheduler(**_lowerCamelCase )
A_ : List[Any] = DPMSolverMultistepInverseScheduler(**_lowerCamelCase )
A_ : Tuple = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Union[str, Any] = self.get_dummy_inversion_inputs(_lowerCamelCase )
A_ : List[Any] = pipe.invert(**_lowerCamelCase ).images
A_ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
A_ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase_ ( cls ) -> Dict:
A_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
A_ : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
A_ : Tuple = raw_image
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Tuple = torch.manual_seed(0 )
A_ : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
A_ : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
A_ : Any = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : str = """a bowl of fruit"""
A_ : Optional[int] = """a bowl of pears"""
A_ : str = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
A_ : Tuple = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase ).latents
A_ : Optional[int] = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
A_ : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
A_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = """a bowl of fruit"""
A_ : Dict = """a bowl of pears"""
A_ : List[str] = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
A_ : Any = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase , num_inference_steps=25 , ).latents
A_ : int = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
A_ : Any = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 344 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
assert isinstance(a_ , a_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : str = tmp_path / """cache"""
A_ : int = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = TextDatasetReader(a_ , cache_dir=a_ , keep_in_memory=a_ ).read()
_check_text_dataset(a_ , a_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Any = tmp_path / """cache"""
A_ : Dict = {"""text""": """string"""}
A_ : Any = features.copy() if features else default_expected_features
A_ : Union[str, Any] = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : List[str] = TextDatasetReader(a_ , features=a_ , cache_dir=a_ ).read()
_check_text_dataset(a_ , a_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[Any] = tmp_path / """cache"""
A_ : Optional[int] = {"""text""": """string"""}
A_ : List[str] = TextDatasetReader(a_ , cache_dir=a_ , split=a_ ).read()
_check_text_dataset(a_ , a_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
if issubclass(a_ , a_ ):
A_ : Optional[Any] = text_path
elif issubclass(a_ , a_ ):
A_ : Tuple = [text_path]
A_ : str = tmp_path / """cache"""
A_ : Optional[Any] = {"""text""": """string"""}
A_ : Union[str, Any] = TextDatasetReader(a_ , cache_dir=a_ ).read()
_check_text_dataset(a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_=("train",) ) -> Optional[int]:
"""simple docstring"""
assert isinstance(a_ , a_ )
for split in splits:
A_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : Optional[int] = tmp_path / """cache"""
A_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : int = TextDatasetReader({"""train""": text_path} , cache_dir=a_ , keep_in_memory=a_ ).read()
_check_text_datasetdict(a_ , a_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Union[str, Any] = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
A_ : Tuple = {"""text""": """string"""}
A_ : Optional[Any] = features.copy() if features else default_expected_features
A_ : Dict = (
Features({feature: Value(a_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : List[str] = TextDatasetReader({"""train""": text_path} , features=a_ , cache_dir=a_ ).read()
_check_text_datasetdict(a_ , a_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
if split:
A_ : int = {split: text_path}
else:
A_ : str = """train"""
A_ : Any = {"""train""": text_path, """test""": text_path}
A_ : List[Any] = tmp_path / """cache"""
A_ : Dict = {"""text""": """string"""}
A_ : Optional[Any] = TextDatasetReader(a_ , cache_dir=a_ ).read()
_check_text_datasetdict(a_ , a_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 344 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : int = '▁'
UpperCamelCase__ : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCamelCase__ : List[str] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
UpperCamelCase__ : Union[str, Any] = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
UpperCamelCase__ : Optional[Any] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = ['''input_ids''', '''attention_mask''']
lowerCamelCase = []
lowerCamelCase = []
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<unk>" , _lowerCamelCase="m2m100" , _lowerCamelCase = None , _lowerCamelCase=8 , **_lowerCamelCase , ) -> None:
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : int = language_codes
A_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
A_ : Optional[int] = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
A_ : Optional[Any] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowerCamelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(_lowerCamelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , language_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowerCamelCase , **_lowerCamelCase , )
A_ : str = vocab_file
A_ : Dict = load_json(_lowerCamelCase )
A_ : List[Any] = {v: k for k, v in self.encoder.items()}
A_ : str = spm_file
A_ : str = load_spm(_lowerCamelCase , self.sp_model_kwargs )
A_ : Tuple = len(self.encoder )
A_ : Optional[int] = {
self.get_lang_token(_lowerCamelCase ): self.encoder_size + i for i, lang_code in enumerate(_lowerCamelCase )
}
A_ : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowerCamelCase )}
A_ : List[Any] = {v: k for k, v in self.lang_token_to_id.items()}
A_ : Union[str, Any] = src_lang if src_lang is not None else """en"""
A_ : List[str] = tgt_lang
A_ : Optional[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A_ : Dict = num_madeup_words
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Tuple:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowerCamelCase , self.unk_token )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
A_ : Union[str, Any] = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
A_ : Any = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self , _lowerCamelCase ) -> None:
A_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
A_ : Dict = Path(_lowerCamelCase )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
A_ : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A_ : List[str] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : int = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = "en" , _lowerCamelCase = None , _lowerCamelCase = "ro" , **_lowerCamelCase , ) -> BatchEncoding:
A_ : Tuple = src_lang
A_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A_ : List[Any] = src_lang
A_ : List[Any] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase )
A_ : Tuple = self.get_lang_id(_lowerCamelCase )
A_ : Any = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self ) -> int:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : Union[str, Any] = self.get_lang_token(_lowerCamelCase )
A_ : Union[str, Any] = self.lang_token_to_id[lang_token]
A_ : Any = [self.cur_lang_id]
A_ : List[Any] = [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : List[str] = self.get_lang_token(_lowerCamelCase )
A_ : Dict = self.lang_token_to_id[lang_token]
A_ : List[str] = [self.cur_lang_id]
A_ : Union[str, Any] = [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
return self.lang_code_to_token[lang]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> int:
A_ : Tuple = self.get_lang_token(_lowerCamelCase )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase ( a_ , a_ ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
A_ : Any = sentencepiece.SentencePieceProcessor(**a_ )
spm.Load(str(a_ ) )
return spm
def UpperCAmelCase ( a_ ) -> Union[Dict, List]:
"""simple docstring"""
with open(a_ , """r""" ) as f:
return json.load(a_ )
def UpperCAmelCase ( a_ , a_ ) -> None:
"""simple docstring"""
with open(a_ , """w""" ) as f:
json.dump(a_ , a_ , indent=2 )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , """num_encoder_blocks""" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=64 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[2, 2, 2, 2] , _lowerCamelCase=[8, 4, 2, 1] , _lowerCamelCase=[16, 32, 64, 128] , _lowerCamelCase=[1, 4, 8, 16] , _lowerCamelCase=[1, 2, 4, 8] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> int:
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : List[str] = image_size
A_ : Union[str, Any] = num_channels
A_ : Tuple = num_encoder_blocks
A_ : Union[str, Any] = sr_ratios
A_ : Any = depths
A_ : str = hidden_sizes
A_ : str = downsampling_rates
A_ : List[Any] = num_attention_heads
A_ : Tuple = is_training
A_ : Dict = use_labels
A_ : Union[str, Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Any = initializer_range
A_ : str = num_labels
A_ : Optional[Any] = scope
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
A_ : Optional[int] = SegformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[Any] = model(_lowerCamelCase )
A_ : List[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Optional[int] = self.num_labels
A_ : Union[str, Any] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A_ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Optional[Any] = 1
A_ : Optional[Any] = SegformerForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_lowerCamelCase )
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = self.prepare_config_and_inputs()
A_ , A_ , A_ : Tuple = config_and_inputs
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[Any] = SegformerModelTester(self )
A_ : List[Any] = SegformerConfigTester(self , config_class=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Any:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_lowerCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : int = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[Any] = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = True
for model_class in self.all_model_classes:
A_ : Any = True
A_ : List[Any] = False
A_ : List[str] = True
A_ : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : str = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : Tuple = outputs.attentions
A_ : List[str] = sum(self.model_tester.depths )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : Dict = True
A_ : int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first attentions (first block, first layer)
A_ : Dict = (self.model_tester.image_size // 4) ** 2
A_ : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A_ : Optional[int] = (self.model_tester.image_size // 32) ** 2
A_ : Tuple = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A_ : Optional[Any] = len(_lowerCamelCase )
# Check attention is always last and order is fine
A_ : List[Any] = True
A_ : Union[str, Any] = True
A_ : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Any = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCamelCase ) )
A_ : List[Any] = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first attentions (first block, first layer)
A_ : Dict = (self.model_tester.image_size // 4) ** 2
A_ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : List[str] = outputs.hidden_states
A_ : int = self.model_tester.num_encoder_blocks
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Optional[int] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : int = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ):
continue
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : int = model(**_lowerCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase_ ( self ) -> Dict:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = SegformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
# only resize + normalize
A_ : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
A_ : List[str] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_lowerCamelCase )
A_ : str = prepare_img()
A_ : Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : str = encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
A_ : Any = model(_lowerCamelCase )
A_ : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ) -> str:
# only resize + normalize
A_ : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
A_ : List[Any] = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(_lowerCamelCase )
A_ : Optional[int] = prepare_img()
A_ : Dict = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : List[Any] = encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
A_ : str = model(_lowerCamelCase )
A_ : List[str] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : str = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-1 ) )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
# only resize + normalize
A_ : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
A_ : str = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_lowerCamelCase )
A_ : List[str] = prepare_img()
A_ : Optional[int] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Any = encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
A_ : List[str] = model(_lowerCamelCase )
A_ : List[str] = outputs.logits.detach().cpu()
A_ : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(500, 300)] )
A_ : str = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
A_ : str = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
A_ : Dict = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 344 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Union[str, Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Optional[int] = 'sshleifer/student_marian_en_ro_6_1'
UpperCamelCase__ : Tuple = 'sshleifer/tiny-mbart'
@require_torch
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , ) -> List[Any]:
A_ : List[str] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , extra_args_str=_lowerCamelCase , predict_with_generate=_lowerCamelCase , do_train=_lowerCamelCase , do_eval=_lowerCamelCase , do_predict=_lowerCamelCase , )
A_ : List[str] = TrainerState.load_from_json(os.path.join(_lowerCamelCase , """trainer_state.json""" ) ).log_history
if not do_eval:
return
A_ : List[Any] = [log for log in logs if """eval_loss""" in log.keys()]
A_ : List[str] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A_ : Optional[int] = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , _lowerCamelCase )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCAmelCase_ ( self ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=_lowerCamelCase )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=_lowerCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self ) -> int:
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=_lowerCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self ) -> Tuple:
self.run_seqaseq_quick(
distributed=_lowerCamelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=_lowerCamelCase )
@require_apex
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
A_ : Optional[Any] = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
A_ : Dict = experiments[experiment_id]
A_ : Union[str, Any] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
A_ : List[str] = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowerCamelCase , extra_args_str=data["""extra_args_str"""] )
A_ : List[str] = len(re.findall(_lowerCamelCase , cl.err ) )
self.assertEqual(_lowerCamelCase , data["""n_matches"""] )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowerCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=_lowerCamelCase , )
# Check metrics
A_ : Tuple = TrainerState.load_from_json(os.path.join(_lowerCamelCase , """trainer_state.json""" ) ).log_history
A_ : int = [log for log in logs if """eval_loss""" in log.keys()]
A_ : Tuple = eval_metrics[0]
A_ : str = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , _lowerCamelCase )
# test if do_predict saves generations and metrics
A_ : str = os.listdir(_lowerCamelCase )
A_ : int = {os.path.basename(_lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowerCamelCase ) -> Tuple[int, float]:
A_ : int = """--skip_memory_metrics 0"""
A_ : Optional[Any] = self.run_trainer(
max_len=128 , model_name=_lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=_lowerCamelCase , distributed=_lowerCamelCase , extra_args_str=_lowerCamelCase , do_eval=_lowerCamelCase , do_predict=_lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
A_ : List[str] = TrainerState.load_from_json(Path(_lowerCamelCase , """trainer_state.json""" ) ).log_history
A_ : Dict = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
A_ : List[str] = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
A_ : int = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A_ , A_ , A_ : str = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A_ , A_ , A_ : str = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A_ : Optional[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A_ : Dict = gpu_peak_mem_orig + gpu_alloc_mem_orig
A_ : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A_ : int = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A_ : Any = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowerCamelCase , _lowerCamelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowerCamelCase , _lowerCamelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowerCamelCase , _lowerCamelCase , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 3e-3 , _lowerCamelCase = "adafactor" , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , ) -> List[str]:
A_ : Tuple = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
A_ : str = self.get_auto_remove_tmp_dir()
A_ : int = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowerCamelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
A_ : Any = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowerCamelCase )}\n ".split()
A_ : int = """
--do_predict
""".split()
A_ : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A_ : str = get_gpu_count()
A_ : Dict = get_torch_dist_unique_port()
A_ : Union[str, Any] = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
A_ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
else:
A_ : Dict = ["""run_translation.py"""] + args
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase ):
main()
return output_dir
| 344 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BioGptTokenizer
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A_ : Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
A_ : Union[str, Any] = """lower newer"""
A_ : Any = """lower newer"""
return input_text, output_text
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
A_ : Union[str, Any] = """lower"""
A_ : List[Any] = ["""low""", """er</w>"""]
A_ : Dict = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = tokens + ["""<unk>"""]
A_ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
A_ : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 344 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 1 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
A_ : Any = 0
A_ : List[str] = 0
A_ : List[str] = {}
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Tuple:
if vertex not in self.adjacency:
A_ : List[Any] = {}
self.num_vertices += 1
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
self.add_vertex(_lowerCamelCase )
self.add_vertex(_lowerCamelCase )
if head == tail:
return
A_ : Optional[Any] = weight
A_ : Dict = weight
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = self.get_edges()
for edge in edges:
A_ , A_ , A_ : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCamelCase ) ):
A_ : str = list(edges[i] )
edges.sort(key=lambda _lowerCamelCase : e[2] )
for i in range(len(_lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A_ : List[Any] = edges[i][2] + 1
for edge in edges:
A_ , A_ , A_ : Optional[int] = edge
A_ : Union[str, Any] = weight
A_ : List[Any] = weight
def __str__( self ) -> List[Any]:
A_ : Dict = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A_ : List[Any] = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCAmelCase_ ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase=None , _lowerCamelCase=None ) -> Dict:
A_ : Tuple = Graph()
if vertices is None:
A_ : List[str] = []
if edges is None:
A_ : List[Any] = []
for vertex in vertices:
g.add_vertex(_lowerCamelCase )
for edge in edges:
g.add_edge(*_lowerCamelCase )
return g
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
A_ : Union[str, Any] = {}
A_ : Any = {}
def __len__( self ) -> List[str]:
return len(self.parent )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Any:
if item in self.parent:
return self.find(_lowerCamelCase )
A_ : Optional[Any] = item
A_ : List[str] = 0
return item
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Any:
if item not in self.parent:
return self.make_set(_lowerCamelCase )
if item != self.parent[item]:
A_ : Optional[int] = self.find(self.parent[item] )
return self.parent[item]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Union[str, Any] = self.find(_lowerCamelCase )
A_ : List[Any] = self.find(_lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A_ : List[str] = roota
return roota
if self.rank[roota] < self.rank[roota]:
A_ : List[str] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A_ : List[str] = roota
return roota
return None
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase ) -> Any:
A_ : int = graph.num_vertices
A_ : Optional[int] = Graph.UnionFind()
A_ : Any = []
while num_components > 1:
A_ : int = {}
for vertex in graph.get_vertices():
A_ : str = -1
A_ : List[Any] = graph.get_edges()
for edge in edges:
A_ , A_ , A_ : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
A_ , A_ , A_ : str = edge
A_ : Tuple = union_find.find(_lowerCamelCase )
A_ : str = union_find.find(_lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A_ : List[str] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A_ : int = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A_ , A_ , A_ : List[str] = cheap_edge[vertex]
if union_find.find(_lowerCamelCase ) != union_find.find(_lowerCamelCase ):
union_find.union(_lowerCamelCase , _lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
A_ : Tuple = num_components - 1
A_ : Optional[Any] = Graph.build(edges=_lowerCamelCase )
return mst
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
UpperCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Optional[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : Optional[Any] = _calculate(days - 1 , a_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : str = _calculate(days - 1 , a_ , 0 )
A_ : Union[str, Any] = state_late + state_absent + state_ontime
A_ : List[str] = prizestrings
return prizestrings
def UpperCAmelCase ( a_ = 3_0 ) -> int:
"""simple docstring"""
return _calculate(a_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 344 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0.0 , _lowerCamelCase = None , _lowerCamelCase = "geglu" , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = "layer_norm" , _lowerCamelCase = False , ) -> str:
super().__init__()
A_ : Tuple = only_cross_attention
A_ : Dict = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
A_ : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A_ : Union[str, Any] = AdaLayerNorm(_lowerCamelCase , _lowerCamelCase )
elif self.use_ada_layer_norm_zero:
A_ : Optional[Any] = AdaLayerNormZero(_lowerCamelCase , _lowerCamelCase )
else:
A_ : Dict = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
A_ : List[str] = Attention(
query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , dropout=_lowerCamelCase , bias=_lowerCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowerCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A_ : List[Any] = (
AdaLayerNorm(_lowerCamelCase , _lowerCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
)
A_ : Optional[int] = Attention(
query_dim=_lowerCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowerCamelCase , dim_head=_lowerCamelCase , dropout=_lowerCamelCase , bias=_lowerCamelCase , upcast_attention=_lowerCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
A_ : Tuple = None
A_ : Any = None
# 3. Feed-forward
A_ : str = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
A_ : Any = FeedForward(_lowerCamelCase , dropout=_lowerCamelCase , activation_fn=_lowerCamelCase , final_dropout=_lowerCamelCase )
# let chunk size default to None
A_ : Optional[Any] = None
A_ : int = 0
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
# Sets chunk feed-forward
A_ : Union[str, Any] = chunk_size
A_ : List[str] = dim
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) -> Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A_ : List[Any] = self.norma(_lowerCamelCase , _lowerCamelCase )
elif self.use_ada_layer_norm_zero:
A_ , A_ , A_ , A_ , A_ : Dict = self.norma(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hidden_dtype=hidden_states.dtype )
else:
A_ : List[Any] = self.norma(_lowerCamelCase )
A_ : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A_ : int = self.attna(
_lowerCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowerCamelCase , **_lowerCamelCase , )
if self.use_ada_layer_norm_zero:
A_ : Optional[Any] = gate_msa.unsqueeze(1 ) * attn_output
A_ : int = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A_ : Optional[Any] = (
self.norma(_lowerCamelCase , _lowerCamelCase ) if self.use_ada_layer_norm else self.norma(_lowerCamelCase )
)
A_ : Optional[int] = self.attna(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase , )
A_ : Tuple = attn_output + hidden_states
# 3. Feed-forward
A_ : List[str] = self.norma(_lowerCamelCase )
if self.use_ada_layer_norm_zero:
A_ : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
A_ : List[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A_ : List[str] = torch.cat(
[self.ff(_lowerCamelCase ) for hid_slice in norm_hidden_states.chunk(_lowerCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A_ : Optional[int] = self.ff(_lowerCamelCase )
if self.use_ada_layer_norm_zero:
A_ : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output
A_ : Tuple = ff_output + hidden_states
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 4 , _lowerCamelCase = 0.0 , _lowerCamelCase = "geglu" , _lowerCamelCase = False , ) -> int:
super().__init__()
A_ : List[str] = int(dim * mult )
A_ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A_ : Any = GELU(_lowerCamelCase , _lowerCamelCase )
if activation_fn == "gelu-approximate":
A_ : Dict = GELU(_lowerCamelCase , _lowerCamelCase , approximate="""tanh""" )
elif activation_fn == "geglu":
A_ : int = GEGLU(_lowerCamelCase , _lowerCamelCase )
elif activation_fn == "geglu-approximate":
A_ : Tuple = ApproximateGELU(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = nn.ModuleList([] )
# project in
self.net.append(_lowerCamelCase )
# project dropout
self.net.append(nn.Dropout(_lowerCamelCase ) )
# project out
self.net.append(nn.Linear(_lowerCamelCase , _lowerCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowerCamelCase ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Any:
for module in self.net:
A_ : Dict = module(_lowerCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "none" ) -> Optional[Any]:
super().__init__()
A_ : int = nn.Linear(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = approximate
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
if gate.device.type != "mps":
return F.gelu(_lowerCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.proj(_lowerCamelCase )
A_ : Any = self.gelu(_lowerCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> str:
super().__init__()
A_ : Any = nn.Linear(_lowerCamelCase , dim_out * 2 )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> int:
if gate.device.type != "mps":
return F.gelu(_lowerCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ , A_ : List[str] = self.proj(_lowerCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_lowerCamelCase )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> int:
super().__init__()
A_ : List[Any] = nn.Linear(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> int:
A_ : str = self.proj(_lowerCamelCase )
return x * torch.sigmoid(1.702 * x )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> int:
super().__init__()
A_ : Dict = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
A_ : int = nn.SiLU()
A_ : int = nn.Linear(_lowerCamelCase , embedding_dim * 2 )
A_ : str = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Dict = self.linear(self.silu(self.emb(_lowerCamelCase ) ) )
A_ , A_ : Any = torch.chunk(_lowerCamelCase , 2 )
A_ : Union[str, Any] = self.norm(_lowerCamelCase ) * (1 + scale) + shift
return x
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
super().__init__()
A_ : Tuple = CombinedTimestepLabelEmbeddings(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = nn.SiLU()
A_ : Union[str, Any] = nn.Linear(_lowerCamelCase , 6 * embedding_dim , bias=_lowerCamelCase )
A_ : Any = nn.LayerNorm(_lowerCamelCase , elementwise_affine=_lowerCamelCase , eps=1e-6 )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[str]:
A_ : Optional[Any] = self.linear(self.silu(self.emb(_lowerCamelCase , _lowerCamelCase , hidden_dtype=_lowerCamelCase ) ) )
A_ , A_ , A_ , A_ , A_ , A_ : int = emb.chunk(6 , dim=1 )
A_ : List[Any] = self.norm(_lowerCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 1e-5 ) -> int:
super().__init__()
A_ : Tuple = num_groups
A_ : Union[str, Any] = eps
if act_fn is None:
A_ : List[str] = None
else:
A_ : Dict = get_activation(_lowerCamelCase )
A_ : Optional[Any] = nn.Linear(_lowerCamelCase , out_dim * 2 )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
if self.act:
A_ : Dict = self.act(_lowerCamelCase )
A_ : Union[str, Any] = self.linear(_lowerCamelCase )
A_ : Tuple = emb[:, :, None, None]
A_ , A_ : int = emb.chunk(2 , dim=1 )
A_ : Union[str, Any] = F.group_norm(_lowerCamelCase , self.num_groups , eps=self.eps )
A_ : Any = x * (1 + scale) + shift
return x
| 344 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ : Tuple = {
'allenai/led-base-16384': 16_384,
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = LEDTokenizer
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) -> Dict:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
A_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCamelCase ) != add_prefix_space:
A_ : Dict = getattr(_lowerCamelCase , pre_tok_state.pop("""type""" ) )
A_ : Tuple = add_prefix_space
A_ : Optional[Any] = pre_tok_class(**_lowerCamelCase )
A_ : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Any = """post_processor"""
A_ : Optional[Any] = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
A_ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Union[str, Any] = tuple(state["""sep"""] )
if "cls" in state:
A_ : List[str] = tuple(state["""cls"""] )
A_ : str = False
if state.get("""add_prefix_space""" , _lowerCamelCase ) != add_prefix_space:
A_ : Tuple = add_prefix_space
A_ : Dict = True
if state.get("""trim_offsets""" , _lowerCamelCase ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[int] = getattr(_lowerCamelCase , state.pop("""type""" ) )
A_ : str = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCAmelCase_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
A_ : Union[str, Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
A_ : Optional[Any] = value
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> BatchEncoding:
A_ : List[Any] = kwargs.get("""is_split_into_words""" , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> BatchEncoding:
A_ : List[str] = kwargs.get("""is_split_into_words""" , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
A_ : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> str:
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
A_ : Optional[int] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) -> dict:
A_ : List[Any] = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A_ : Tuple = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(_lowerCamelCase )
if needs_to_be_padded:
A_ : str = len(_lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : Dict = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A_ : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 344 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : int = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A_ : Union[str, Any] = """https://pypi.org/pypi/diffusers/json"""
A_ : List[str] = json.loads(request.urlopen(a_ ).read() )["""releases"""].keys()
return sorted(a_ , key=lambda a_ : version.Version(a_ ) )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a_ )
os.makedirs(a_ , exist_ok=a_ )
A_ : Any = Path(a_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
init_hf_modules()
A_ : Optional[Any] = Path(a_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a_ , exist_ok=a_ )
A_ : Optional[int] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
A_ : Optional[Any] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , a_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , a_ , flags=re.MULTILINE )
# Unique-ify
return list(set(a_ ) )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = False
A_ : List[str] = [module_file]
A_ : int = []
# Let's recurse through all relative imports
while not no_change:
A_ : Optional[int] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a_ ) )
A_ : Tuple = Path(a_ ).parent
A_ : Any = [str(module_path / m ) for m in new_imports]
A_ : str = [f for f in new_import_files if f not in all_relative_imports]
A_ : int = [F"{f}.py" for f in new_import_files]
A_ : Dict = len(a_ ) == 0
all_relative_imports.extend(a_ )
return all_relative_imports
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : Optional[int] = f.read()
# Imports of the form `import xxx`
A_ : Dict = re.findall("""^\s*import\s+(\S+)\s*$""" , a_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , a_ , flags=re.MULTILINE )
# Only keep the top-level module
A_ : int = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
A_ : List[str] = list(set(a_ ) )
A_ : List[Any] = []
for imp in imports:
try:
importlib.import_module(a_ )
except ImportError:
missing_packages.append(a_ )
if len(a_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(a_ )}. Run `pip install {' '.join(a_ )}`" )
return get_relative_imports(a_ )
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : Dict = module_path.replace(os.path.sep , """.""" )
A_ : int = importlib.import_module(a_ )
if class_name is None:
return find_pipeline_class(a_ )
return getattr(a_ , a_ )
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
A_ : Optional[Any] = dict(inspect.getmembers(a_ , inspect.isclass ) )
A_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , a_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
A_ : Dict = cls
return pipeline_class
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , ) -> Union[str, Any]:
"""simple docstring"""
A_ : Tuple = str(a_ )
A_ : str = os.path.join(a_ , a_ )
if os.path.isfile(a_ ):
A_ : Union[str, Any] = module_file_or_url
A_ : Tuple = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
A_ : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
A_ : Dict = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
A_ : Dict = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
A_ : List[Any] = F"v{revision}"
elif revision == "main":
A_ : int = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
A_ : Optional[int] = COMMUNITY_PIPELINES_URL.format(revision=a_ , pipeline=a_ )
try:
A_ : Optional[Any] = cached_download(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
A_ : List[Any] = """git"""
A_ : List[str] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
A_ : List[str] = hf_hub_download(
a_ , a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , )
A_ : Union[str, Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
A_ : Optional[int] = check_imports(a_ )
# Now we move the module inside our cached dynamic modules.
A_ : List[str] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a_ )
A_ : int = Path(a_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a_ , submodule_path / module_file )
for module_needed in modules_needed:
A_ : Any = F"{module_needed}.py"
shutil.copy(os.path.join(a_ , a_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a_ , a_ ):
A_ : List[str] = use_auth_token
elif use_auth_token is True:
A_ : str = HfFolder.get_token()
else:
A_ : Optional[Any] = None
A_ : Optional[Any] = model_info(a_ , revision=a_ , token=a_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A_ : Union[str, Any] = submodule_path / commit_hash
A_ : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a_ )
if not (submodule_path / module_file).exists():
shutil.copy(a_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a_ , F"{module_needed}.py" , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return os.path.join(a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , a_ = None , a_ = None , a_ = False , **a_ , ) -> List[str]:
"""simple docstring"""
A_ : Tuple = get_cached_module_file(
a_ , a_ , cache_dir=a_ , force_download=a_ , resume_download=a_ , proxies=a_ , use_auth_token=a_ , revision=a_ , local_files_only=a_ , )
return get_class_in_module(a_ , final_module.replace(""".py""" , """""" ) )
| 344 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Tuple = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=a_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=a_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=a_ )
return parser.parse_args()
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
A_ : List[Any] = parse_args()
# Import training_script as a module.
A_ : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A_ : Optional[Any] = script_fpath.stem
A_ : int = importlib.import_module(a_ )
# Patch sys.argv
A_ : Any = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 344 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ : int = 16
UpperCamelCase__ : str = 32
def UpperCAmelCase ( a_ , a_ = 1_6 , a_ = "bert-base-cased" ) -> Optional[int]:
"""simple docstring"""
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(a_ )
A_ : int = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
A_ : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : str = datasets.map(
a_ , batched=a_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(a_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A_ : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
A_ : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
A_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Any = config["""lr"""]
A_ : Tuple = int(config["""num_epochs"""] )
A_ : int = int(config["""seed"""] )
A_ : Optional[Any] = int(config["""batch_size"""] )
A_ : int = args.model_name_or_path
set_seed(a_ )
A_ , A_ : str = get_dataloaders(a_ , a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Dict = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ )
# Instantiate optimizer
A_ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : Optional[int] = optimizer_cls(params=model.parameters() , lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
A_ : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A_ : str = 1
A_ : str = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : str = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
A_ : Any = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Any = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# We need to keep track of how many total steps we have iterated over
A_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : List[str] = 0
# Now we train the model
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
A_ : Union[str, Any] = 0
A_ : int = {}
for epoch in range(a_ , a_ ):
model.train()
for step, batch in enumerate(a_ ):
A_ : Any = model(**a_ )
A_ : Union[str, Any] = outputs.loss
A_ : str = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A_ : str = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : List[str] = model(**a_ )
A_ : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A_ , A_ : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a_ ) - 1:
A_ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A_ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a_ , references=a_ , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , a_ )
A_ : Union[str, Any] = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
A_ : Tuple = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(a_ , a_ )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
A_ : Dict = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=a_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=a_ , )
parser.add_argument(
"""--output_dir""" , type=a_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=a_ , default=a_ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=a_ , default=3 , help="""Number of train epochs.""" , )
A_ : Union[str, Any] = parser.parse_args()
A_ : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase ( a_ , a_ ) -> str | Literal[False]:
"""simple docstring"""
A_ : Union[str, Any] = list(a_ )
A_ : int = list(a_ )
A_ : str = 0
for i in range(len(a_ ) ):
if lista[i] != lista[i]:
count += 1
A_ : Tuple = """_"""
if count > 1:
return False
else:
return "".join(a_ )
def UpperCAmelCase ( a_ ) -> list[str]:
"""simple docstring"""
A_ : List[Any] = []
while True:
A_ : List[Any] = ["""$"""] * len(a_ )
A_ : List[Any] = []
for i in range(len(a_ ) ):
for j in range(i + 1 , len(a_ ) ):
A_ : str = compare_string(binary[i] , binary[j] )
if k is False:
A_ : int = """*"""
A_ : Optional[int] = """*"""
temp.append("""X""" )
for i in range(len(a_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a_ ) == 0:
return pi
A_ : List[Any] = list(set(a_ ) )
def UpperCAmelCase ( a_ , a_ ) -> list[str]:
"""simple docstring"""
A_ : Union[str, Any] = []
for minterm in minterms:
A_ : str = """"""
for _ in range(a_ ):
A_ : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a_ )
return temp
def UpperCAmelCase ( a_ , a_ , a_ ) -> bool:
"""simple docstring"""
A_ : Optional[int] = list(a_ )
A_ : List[Any] = list(a_ )
A_ : List[str] = 0
for i in range(len(a_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase ( a_ , a_ ) -> list[str]:
"""simple docstring"""
A_ : Tuple = []
A_ : Any = [0] * len(a_ )
for i in range(len(chart[0] ) ):
A_ : List[str] = 0
A_ : str = -1
for j in range(len(a_ ) ):
if chart[j][i] == 1:
count += 1
A_ : List[str] = j
if count == 1:
A_ : Optional[Any] = 1
for i in range(len(a_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a_ ) ):
A_ : Any = 0
temp.append(prime_implicants[i] )
while True:
A_ : List[str] = 0
A_ : Dict = -1
A_ : List[str] = 0
for i in range(len(a_ ) ):
A_ : List[Any] = chart[i].count(1 )
if count_n > max_n:
A_ : Union[str, Any] = count_n
A_ : List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a_ ) ):
A_ : Union[str, Any] = 0
def UpperCAmelCase ( a_ , a_ ) -> list[list[int]]:
"""simple docstring"""
A_ : int = [[0 for x in range(len(a_ ) )] for x in range(len(a_ ) )]
for i in range(len(a_ ) ):
A_ : List[str] = prime_implicants[i].count("""_""" )
for j in range(len(a_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , a_ ):
A_ : List[Any] = 1
return chart
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : Optional[int] = int(input("""Enter the no. of variables\n""" ) )
A_ : int = [
float(a_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
A_ : Any = decimal_to_binary(a_ , a_ )
A_ : int = check(a_ )
print("""Prime Implicants are:""" )
print(a_ )
A_ : Any = prime_implicant_chart(a_ , a_ )
A_ : Union[str, Any] = selection(a_ , a_ )
print("""Essential Prime Implicants are:""" )
print(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase__ : str = '.'
if __name__ == "__main__":
UpperCamelCase__ : List[str] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : List[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase__ : Any = line.strip()
UpperCamelCase__ : Optional[int] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase__ : Any = '\n'.join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 344 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=16 , _lowerCamelCase=36 , _lowerCamelCase=6 , _lowerCamelCase=6 , _lowerCamelCase=6 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) -> List[str]:
A_ : Dict = parent
A_ : Any = batch_size
A_ : Union[str, Any] = seq_length
A_ : Optional[int] = is_training
A_ : Any = use_input_mask
A_ : Dict = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : int = vocab_size
A_ : List[Any] = embedding_size
A_ : Tuple = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_hidden_groups
A_ : Union[str, Any] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Tuple = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : str = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Union[str, Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : Dict = scope
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[Any] = None
if self.use_input_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = None
if self.use_token_type_ids:
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : List[str] = None
A_ : Tuple = None
A_ : Tuple = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> Tuple:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : str = AlbertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : int = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
A_ : str = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Union[str, Any] = AlbertForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , sentence_order_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Dict = AlbertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : Optional[Any] = AlbertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Dict = self.num_labels
A_ : Tuple = AlbertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Dict = self.num_labels
A_ : List[Any] = AlbertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
A_ : int = self.num_choices
A_ : List[Any] = AlbertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> str:
A_ : Tuple = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
A_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
A_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : List[Any] = AlbertModelTester(self )
A_ : Any = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : int = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Any:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AlbertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : int = AlbertModel.from_pretrained("""albert-base-v2""" )
A_ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A_ : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : List[str] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 344 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ):
A_ : int = F"Input value of [number={number}] must be an integer"
raise TypeError(a_ )
if is_prime(a_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
A_ : Any = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler("""sample_euler""" )
A_ : List[str] = """A painting of a squirrel eating a burger"""
A_ : Tuple = torch.manual_seed(0 )
A_ : Dict = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
A_ : Optional[int] = output.images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Tuple = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ) -> str:
A_ : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler("""sample_euler""" )
A_ : Optional[int] = """A painting of a squirrel eating a burger"""
A_ : int = torch.manual_seed(0 )
A_ : Dict = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
A_ : int = output.images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCAmelCase_ ( self ) -> int:
A_ : str = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
A_ : Union[str, Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
A_ : Dict = """A painting of a squirrel eating a burger"""
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Optional[Any] = sd_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=_lowerCamelCase , )
A_ : Optional[Any] = output.images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A_ : Any = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 344 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase ( a_ ) -> Callable:
"""simple docstring"""
@wraps(a_ )
def _inner_fn(*a_ , **a_ ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , a_ , )
return fn(*a_ , **a_ )
return _inner_fn
| 344 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
@flax_register_to_config
class _lowerCAmelCase ( nn.Module, __A, __A ):
"""simple docstring"""
lowerCamelCase = 32
lowerCamelCase = 4
lowerCamelCase = 4
lowerCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCamelCase = False
lowerCamelCase = (320, 640, 1280, 1280)
lowerCamelCase = 2
lowerCamelCase = 8
lowerCamelCase = None
lowerCamelCase = 1280
lowerCamelCase = 0.0
lowerCamelCase = False
lowerCamelCase = jnp.floataa
lowerCamelCase = True
lowerCamelCase = 0
lowerCamelCase = False
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> FrozenDict:
# init input tensors
A_ : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
A_ : Optional[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
A_ : str = jnp.ones((1,) , dtype=jnp.intaa )
A_ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ , A_ : int = jax.random.split(_lowerCamelCase )
A_ : Dict = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : str = self.block_out_channels
A_ : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ : List[str] = self.num_attention_heads or self.attention_head_dim
# input
A_ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ : Dict = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ : Optional[int] = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
A_ : str = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Any = (num_attention_heads,) * len(self.down_block_types )
# down
A_ : List[Any] = []
A_ : str = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
A_ : int = output_channel
A_ : Tuple = block_out_channels[i]
A_ : str = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ : Dict = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A_ : List[Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
A_ : Any = down_blocks
# mid
A_ : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
A_ : Any = []
A_ : str = list(reversed(_lowerCamelCase ) )
A_ : int = list(reversed(_lowerCamelCase ) )
A_ : Dict = list(reversed(_lowerCamelCase ) )
A_ : Union[str, Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
A_ : List[str] = output_channel
A_ : List[Any] = reversed_block_out_channels[i]
A_ : List[Any] = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
A_ : List[Any] = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
A_ : int = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A_ : Dict = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
A_ : str = output_channel
A_ : str = up_blocks
# out
A_ : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A_ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
A_ : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ : Dict = timesteps.astype(dtype=jnp.floataa )
A_ : str = jnp.expand_dims(_lowerCamelCase , 0 )
A_ : List[str] = self.time_proj(_lowerCamelCase )
A_ : List[Any] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
A_ : str = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
A_ : Dict = self.conv_in(_lowerCamelCase )
# 3. down
A_ : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ , A_ : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
A_ , A_ : List[str] = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
A_ : Optional[Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
A_ : List[str] = new_down_block_res_samples
# 4. mid
A_ : Union[str, Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
A_ : Dict = down_block_res_samples[-(self.layers_per_block + 1) :]
A_ : str = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
A_ : int = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
A_ : int = self.conv_norm_out(_lowerCamelCase )
A_ : Optional[int] = nn.silu(_lowerCamelCase )
A_ : Tuple = self.conv_out(_lowerCamelCase )
A_ : Dict = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import sys
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
A_ : Any = len(a_ )
A_ : Any = [[0 for x in range(a_ )] for x in range(a_ )]
A_ : Any = [[0 for x in range(a_ )] for x in range(a_ )]
for chain_length in range(2 , a_ ):
for a in range(1 , n - chain_length + 1 ):
A_ : List[str] = a + chain_length - 1
A_ : str = sys.maxsize
for c in range(a_ , a_ ):
A_ : List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A_ : Dict = cost
A_ : Union[str, Any] = c
return matrix, sol
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
if i == j:
print("""A""" + str(a_ ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(a_ , a_ , optimal_solution[i][j] )
print_optiomal_solution(a_ , optimal_solution[i][j] + 1 , a_ )
print(""")""" , end=""" """ )
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A_ : Union[str, Any] = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
A_ : int = len(a_ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A_ , A_ : Optional[Any] = matrix_chain_order(a_ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(a_ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 344 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
UpperCamelCase__ : Any = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 344 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 1 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def UpperCAmelCase ( a_ , a_ , a_=None , **a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Any = [x.strip() for x in open(a_ ).readlines()]
A_ : Optional[int] = [x.strip() for x in open(a_ ).readlines()][: len(a_ )]
A_ : List[str] = calculate_rouge(a_ , a_ , **a_ )
if save_path is not None:
save_json(a_ , a_ , indent=a_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 344 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 1 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[Any]:
debug_launcher(test_script.main )
def UpperCAmelCase_ ( self ) -> Dict:
debug_launcher(test_ops.main )
| 344 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''beit'''
def __init__( self , _lowerCamelCase=8192 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=[3, 5, 7, 11] , _lowerCamelCase=[1, 2, 3, 6] , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=256 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=255 , **_lowerCamelCase , ) -> Any:
super().__init__(**_lowerCamelCase )
A_ : List[str] = vocab_size
A_ : Any = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : int = initializer_range
A_ : int = layer_norm_eps
A_ : Any = image_size
A_ : Union[str, Any] = patch_size
A_ : Optional[int] = num_channels
A_ : Tuple = use_mask_token
A_ : int = use_absolute_position_embeddings
A_ : str = use_relative_position_bias
A_ : Union[str, Any] = use_shared_relative_position_bias
A_ : List[Any] = layer_scale_init_value
A_ : Any = drop_path_rate
A_ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Optional[int] = out_indices
A_ : Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Tuple = use_auxiliary_head
A_ : int = auxiliary_loss_weight
A_ : int = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : Any = auxiliary_concat_input
A_ : Any = semantic_loss_ignore_index
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase = None
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase = PandasConfig
def UpperCAmelCase_ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
A_ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : List[str] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A_ : List[str] = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : Dict = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
for i, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
with open(_lowerCamelCase , """rb""" ) as f:
A_ : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(_lowerCamelCase ) )
yield i, self._cast_table(_lowerCamelCase )
| 344 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = '▁'
UpperCamelCase__ : Union[str, Any] = {'vocab_file': 'spiece.model'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
UpperCamelCase__ : int = {
'google/reformer-crime-and-punishment': 524_288,
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase=[] , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
A_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : str = vocab_file
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> Dict[str, int]:
A_ : Optional[int] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
A_ : Tuple = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self , _lowerCamelCase ) -> Optional[int]:
A_ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : List[Any] = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
return self.sp_model.piece_to_id(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Any:
if index < self.sp_model.get_piece_size():
A_ : List[Any] = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Tuple:
A_ : List[Any] = []
A_ : int = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : List[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : List[Any] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 344 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
A_ : Union[str, Any] = 3_8_4
if "tiny" in model_name:
A_ : Union[str, Any] = [3, 3, 9, 3]
A_ : int = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
A_ : List[str] = [3, 3, 2_7, 3]
A_ : str = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
A_ : str = [3, 3, 2_7, 3]
A_ : List[Any] = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
A_ : Optional[Any] = 5_1_2
if "large" in model_name:
A_ : int = [3, 3, 2_7, 3]
A_ : int = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
A_ : Union[str, Any] = 7_6_8
if "xlarge" in model_name:
A_ : str = [3, 3, 2_7, 3]
A_ : Optional[Any] = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
A_ : Union[str, Any] = 1_0_2_4
# set label information
A_ : Tuple = 1_5_0
A_ : List[Any] = """huggingface/label-files"""
A_ : Tuple = """ade20k-id2label.json"""
A_ : Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[Any] = {int(a_ ): v for k, v in idalabel.items()}
A_ : List[str] = {v: k for k, v in idalabel.items()}
A_ : List[Any] = ConvNextConfig(
depths=a_ , hidden_sizes=a_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
A_ : Any = UperNetConfig(
backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , )
return config
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : str = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = dct.pop(a_ )
A_ : Dict = val
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
A_ : List[Any] = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
A_ : Dict = model_name_to_url[model_name]
A_ : str = torch.hub.load_state_dict_from_url(a_ , map_location="""cpu""" )["""state_dict"""]
A_ : List[str] = get_upernet_config(a_ )
A_ : Union[str, Any] = UperNetForSemanticSegmentation(a_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A_ : Tuple = state_dict.pop(a_ )
if "bn" in key:
A_ : List[Any] = key.replace("""bn""" , """batch_norm""" )
A_ : int = val
# rename keys
A_ : int = create_rename_keys(a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
model.load_state_dict(a_ )
# verify on image
A_ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A_ : Union[str, Any] = Image.open(requests.get(a_ , stream=a_ ).raw ).convert("""RGB""" )
A_ : Union[str, Any] = SegformerImageProcessor()
A_ : List[str] = processor(a_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A_ : List[str] = model(a_ )
if model_name == "upernet-convnext-tiny":
A_ : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
A_ : Union[str, Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
A_ : int = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
A_ : str = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
A_ : Optional[int] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(a_ )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase__ : Any = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 344 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Dict = False, False, False
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = None
# Automatically constructed
lowerCamelCase = "dict"
lowerCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase = field(default='''Audio''', init=__A, repr=__A )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A_ : Any = BytesIO()
sf.write(_lowerCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A_ : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
A_ : Optional[int] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2767
A_ : int = BytesIO(bytes() )
sf.write(_lowerCamelCase , _lowerCamelCase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
A_ , A_ : Union[str, Any] = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
A_ : List[str] = xsplitext(_lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
A_ : Optional[int] = token_per_repo_id or {}
A_ : Tuple = path.split("""::""" )[-1]
try:
A_ : Optional[Any] = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL )["""repo_id"""]
A_ : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
A_ : Any = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase ) as f:
A_ , A_ : str = sf.read(_lowerCamelCase )
else:
A_ , A_ : Optional[Any] = sf.read(_lowerCamelCase )
A_ : List[Any] = array.T
if self.mono:
A_ : int = librosa.to_mono(_lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A_ : List[str] = librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate )
A_ : Any = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
A_ : Any = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
A_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : List[str] = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
A_ : Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
A_ : List[str] = pa.array([Audio().encode_example(_lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A_ : Optional[Any] = storage.field("""bytes""" )
else:
A_ : Dict = pa.array([None] * len(_lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A_ : List[str] = storage.field("""path""" )
else:
A_ : Tuple = pa.array([None] * len(_lowerCamelCase ) , type=pa.string() )
A_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase ):
with xopen(_lowerCamelCase , """rb""" ) as f:
A_ : Optional[int] = f.read()
return bytes_
A_ : Dict = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : Tuple = pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase , self.pa_type )
| 344 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A_ : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( a_ , a_ , a_ ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
A_ : List[Any] = b * b - 4 * a * c
A_ : Tuple = (-b + sqrt(a_ )) / (2 * a)
A_ : List[Any] = (-b - sqrt(a_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ , A_ : List[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 1 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase__ : Optional[Any] = get_tests_dir('fixtures/dummy-config.json')
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Any:
A_ : List[str] = 0
def UpperCAmelCase_ ( self ) -> List[Any]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[Any] = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Dict = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : List[Any] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A_ : Tuple = os.path.join(_lowerCamelCase , """fake-roberta""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
A_ : Any = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(type(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
try:
AutoConfig.register("""custom""" , _lowerCamelCase )
# Wrong model type will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoConfig.register("""model""" , _lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoConfig.register("""bert""" , _lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
A_ : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase )
A_ : int = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase_ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoConfig.from_pretrained("""bert-base""" )
def UpperCAmelCase_ ( self ) -> int:
with self.assertRaisesRegex(
_lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Dict = AutoConfig.from_pretrained(_lowerCamelCase , revision="""aaaaaa""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
_lowerCamelCase , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
A_ : Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase ):
A_ : Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase ):
A_ : Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_lowerCamelCase )
A_ : Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase )
A_ : Tuple = AutoConfig.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''new-model'''
try:
AutoConfig.register("""new-model""" , _lowerCamelCase )
# If remote code is not set, the default is to use local
A_ : Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
A_ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
A_ : int = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_lowerCamelCase )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Optional[int]: # noqa: E741
"""simple docstring"""
while r - l > 1:
A_ : Optional[int] = (l + r) // 2
if v[m] >= key:
A_ : int = m
else:
A_ : List[str] = m # noqa: E741
return r
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if len(a_ ) == 0:
return 0
A_ : List[Any] = [0] * len(a_ )
A_ : Union[str, Any] = 1
A_ : Dict = v[0]
for i in range(1 , len(a_ ) ):
if v[i] < tail[0]:
A_ : Dict = v[i]
elif v[i] > tail[length - 1]:
A_ : int = v[i]
length += 1
else:
A_ : List[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 1 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase_ ( self ) -> List[Any]:
import torch
A_ : Optional[Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
A_ : Tuple = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
A_ : int = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase_ ( self ) -> int:
import torch
A_ : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
A_ : Any = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
A_ : List[Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 344 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Any:
super().__init__()
A_ : Tuple = nn.Linear(3 , 4 )
A_ : Optional[Any] = nn.BatchNormad(4 )
A_ : Dict = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> int:
return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
return (args[0] + 1,) + args[1:], kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
return output + 1
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Union[str, Any] = ModelForTest()
A_ : Optional[Any] = ModelHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(test_model._hf_hook , _lowerCamelCase )
self.assertTrue(hasattr(_lowerCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowerCamelCase )
self.assertFalse(hasattr(_lowerCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowerCamelCase , """_old_forward""" ) )
def UpperCAmelCase_ ( self ) -> Any:
A_ : int = ModelForTest()
A_ : int = ModelHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase , append=_lowerCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(_lowerCamelCase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowerCamelCase )
self.assertFalse(hasattr(_lowerCamelCase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowerCamelCase , """_old_forward""" ) )
def UpperCAmelCase_ ( self ) -> str:
A_ : List[Any] = ModelForTest()
A_ : List[Any] = torch.randn(2 , 3 )
A_ : Optional[int] = test_model(x + 1 )
A_ : Dict = test_model(x + 2 )
A_ : int = PreForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A_ : str = PreForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A_ : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = test_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = ModelForTest()
A_ : Optional[Any] = torch.randn(2 , 3 )
A_ : int = test_model(_lowerCamelCase )
A_ : List[Any] = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A_ : Dict = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
A_ : Any = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A_ : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = test_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , output + 2 , atol=1e-5 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = ModelForTest()
A_ : Tuple = torch.randn(2 , 3 )
A_ : Optional[Any] = test_model(_lowerCamelCase )
A_ : Optional[Any] = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
A_ : Any = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
A_ : Dict = True
A_ : Optional[Any] = test_model(_lowerCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A_ : Dict = torch.randn(2 , 3 )
A_ : List[Any] = model(_lowerCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowerCamelCase , AlignDevicesHook(io_same_device=_lowerCamelCase ) )
A_ : int = torch.randn(2 , 3 ).to(0 )
A_ : Optional[Any] = model(_lowerCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A_ : Dict = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A_ : List[str] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
A_ : Any = torch.randn(2 , 3 )
A_ : Optional[int] = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A_ : List[Any] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A_ : List[Any] = torch.randn(2 , 3 )
A_ : int = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A_ : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A_ : Optional[Any] = torch.device(_lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
A_ : Tuple = torch.randn(2 , 3 )
A_ : Any = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , offload_buffers=_lowerCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A_ : Tuple = torch.randn(2 , 3 )
A_ : Optional[int] = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A_ : List[Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A_ : Optional[Any] = torch.device(_lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
A_ : Tuple = torch.randn(2 , 3 )
A_ : int = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , weights_map=model.state_dict() , offload_buffers=_lowerCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A_ : Tuple = torch.randn(2 , 3 )
A_ : List[Any] = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 344 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
A_ : Union[str, Any] = 0
A_ : str = 0
A_ : Optional[int] = knapsack(a_ , a_ , a_ , a_ , index + 1 )
if weights[index] <= max_weight:
A_ : Optional[Any] = values[index] + knapsack(
a_ , a_ , a_ , max_weight - weights[index] , index + 1 )
return max(a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , ) -> Tuple:
A_ : str = size if size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : List[Any] = batch_size
A_ : Tuple = num_channels
A_ : List[str] = image_size
A_ : Union[str, Any] = min_resolution
A_ : List[Any] = max_resolution
A_ : int = do_resize
A_ : Optional[Any] = size
A_ : List[str] = apply_ocr
def UpperCAmelCase_ ( self ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """apply_ocr""" ) )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase_ ( self ) -> Any:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , _lowerCamelCase )
self.assertIsInstance(encoding.boxes , _lowerCamelCase )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[Any]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ : Dict = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
# with apply_OCR = True
A_ : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
A_ : List[str] = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : str = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
A_ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCamelCase )
self.assertListEqual(encoding.boxes , _lowerCamelCase )
# with apply_OCR = False
A_ : Any = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 344 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 1 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for a, b in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertAlmostEqual(_lowerCamelCase , _lowerCamelCase , delta=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Dict = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def UpperCAmelCase_ ( self ) -> str:
A_ : Dict = None
ops.enable_eager_execution_internal()
A_ : Tuple = tf.config.list_physical_devices("""CPU""" )
if len(_lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A_ : str = tf.config.list_logical_devices(device_type="""CPU""" )
A_ : Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A_ : Any = GradientAccumulator()
A_ : List[Any] = tf.Variable([4.0, 3.0] )
A_ , A_ : Optional[Any] = create_optimizer(5e-5 , 10 , 5 )
A_ : Tuple = tf.Variable([0.0, 0.0] , trainable=_lowerCamelCase )
def accumulate_on_replica(_lowerCamelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_lowerCamelCase , _lowerCamelCase ):
with strategy.scope():
A_ : Union[str, Any] = strategy.experimental_local_results(_lowerCamelCase )
local_variables[0].assign(_lowerCamelCase )
local_variables[1].assign(_lowerCamelCase )
strategy.run(_lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowerCamelCase )
def _check_local_values(_lowerCamelCase , _lowerCamelCase ):
A_ : Dict = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCamelCase__ : List[Any] = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def UpperCAmelCase ( a_=True ) -> List[str]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__A ) )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Any:
with TemporaryDirectory() as tmp_dir:
A_ : Optional[Any] = dataset_module_factory(_lowerCamelCase , cache_dir=_lowerCamelCase )
A_ : List[Any] = import_main_class(dataset_module.module_path , dataset=_lowerCamelCase )
A_ : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase , config_name=_lowerCamelCase , hash=dataset_module.hash , )
A_ : str = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowerCamelCase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
A_ : Union[str, Any] = cached_path(_lowerCamelCase , cache_dir=_lowerCamelCase )
self.assertTrue(os.path.exists(_lowerCamelCase ) )
@pytest.mark.integration
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
A_ : int = dataset_module_factory("""wikipedia""" , cache_dir=a_ )
A_ : Tuple = import_main_class(dataset_module.module_path )
A_ : DatasetBuilder = builder_cls(
cache_dir=a_ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A_ : Any = None
builder_instance.download_and_prepare()
A_ : List[str] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = dataset_module_factory("""wikipedia""" , cache_dir=a_ )
A_ : List[str] = import_main_class(dataset_module.module_path , dataset=a_ )
A_ : DatasetBuilder = builder_cls(
cache_dir=a_ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
A_ : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a_ , a_ )
assert "train" in ds
assert isinstance(ds["""train"""] , a_ )
assert next(iter(ds["""train"""] ) )
| 344 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def UpperCAmelCase ( a_ ) -> typing.Counter[int]:
"""simple docstring"""
A_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a_ , max_perimeter + 1 ):
A_ : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a_ ):
A_ : Any = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = pythagorean_triple(a_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 344 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : Union[str, Any] = """ylacombe/bark-small"""
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : Any = """en_speaker_1"""
A_ : List[str] = """This is a test string"""
A_ : Optional[int] = """speaker_embeddings_path.json"""
A_ : List[str] = """speaker_embeddings"""
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> int:
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.get_tokenizer()
A_ : int = BarkProcessor(tokenizer=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> Any:
A_ : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A_ : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A_ : Any = 35
A_ : Dict = 2
A_ : int = 8
A_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(_lowerCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A_ : Optional[Any] = processor(text=self.input_string , voice_preset=_lowerCamelCase )
A_ : Any = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A_ : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[int] = processor(text=self.input_string , voice_preset=_lowerCamelCase )
A_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A_ : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = BarkProcessor(tokenizer=_lowerCamelCase )
A_ : List[Any] = processor(text=self.input_string )
A_ : Dict = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
A_ : Optional[Any] = [True] * (num + 1)
A_ : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
A_ : Optional[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[str] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 344 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'vocab_file': 'spiece.model'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
UpperCamelCase__ : List[str] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = 2
UpperCamelCase__ : List[str] = 3
UpperCamelCase__ : Tuple = 4
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<sep>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<cls>" , _lowerCamelCase="<mask>" , _lowerCamelCase=["<eop>", "<eod>"] , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A_ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : Tuple = 3
A_ : int = do_lower_case
A_ : Optional[int] = remove_space
A_ : int = keep_accents
A_ : Optional[int] = vocab_file
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> Dict:
return len(self.sp_model )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
A_ : Optional[int] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self , _lowerCamelCase ) -> Optional[Any]:
A_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : List[str] = {}
A_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
if self.remove_space:
A_ : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
A_ : Dict = inputs
A_ : List[Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A_ : Dict = unicodedata.normalize("""NFKD""" , _lowerCamelCase )
A_ : List[str] = """""".join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
A_ : List[str] = outputs.lower()
return outputs
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : Union[str, Any] = self.preprocess_text(_lowerCamelCase )
A_ : Union[str, Any] = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
A_ : Dict = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A_ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : int = cur_pieces[1:]
else:
A_ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
return self.sp_model.PieceToId(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : Union[str, Any] = """""".join(_lowerCamelCase ).replace(_lowerCamelCase , """ """ ).strip()
return out_string
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ) -> str:
A_ : Any = kwargs.pop("""use_source_tokenizer""" , _lowerCamelCase )
A_ : Optional[Any] = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : Tuple = []
A_ : Union[str, Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
A_ : Any = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ : Optional[int] = """""".join(_lowerCamelCase )
A_ : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Union[str, Any] = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
A_ : List[Any] = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1, 1]
return ([0] * len(_lowerCamelCase )) + [1, 1]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
A_ : int = [self.sep_token_id]
A_ : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 344 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
for attribute in key.split(""".""" ):
A_ : List[str] = getattr(a_ , a_ )
if weight_type is not None:
A_ : int = getattr(a_ , a_ ).shape
else:
A_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
A_ : Tuple = value
elif weight_type == "weight_g":
A_ : Dict = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : str = value
else:
A_ : Optional[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
A_ : Union[str, Any] = []
A_ : int = fairseq_model.state_dict()
A_ : Optional[int] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ : Any = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == """group""" , )
A_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
A_ : Dict = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
A_ : Dict = True
if "*" in mapped_key:
A_ : Optional[int] = name.split(a_ )[0].split(""".""" )[-2]
A_ : int = mapped_key.replace("""*""" , a_ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : List[str] = """weight_v"""
elif "weight" in name:
A_ : int = """weight"""
elif "bias" in name:
A_ : Union[str, Any] = """bias"""
else:
A_ : Any = None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : str = full_name.split("""conv_layers.""" )[-1]
A_ : Tuple = name.split(""".""" )
A_ : Tuple = int(items[0] )
A_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A_ : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A_ : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A_ : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A_ : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a_ )
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_=None , a_=None , a_=True ) -> Tuple:
"""simple docstring"""
if config_path is not None:
A_ : Tuple = HubertConfig.from_pretrained(a_ )
else:
A_ : List[str] = HubertConfig()
if is_finetuned:
if dict_path:
A_ : Dict = Dictionary.load(a_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : Optional[Any] = target_dict.pad_index
A_ : List[Any] = target_dict.bos_index
A_ : Any = target_dict.eos_index
A_ : Optional[Any] = len(target_dict.symbols )
A_ : Any = os.path.join(a_ , """vocab.json""" )
if not os.path.isdir(a_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a_ ) )
return
os.makedirs(a_ , exist_ok=a_ )
with open(a_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , a_ )
A_ : Optional[Any] = WavaVecaCTCTokenizer(
a_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a_ , )
A_ : int = True if config.feat_extract_norm == """layer""" else False
A_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a_ , return_attention_mask=a_ , )
A_ : int = WavaVecaProcessor(feature_extractor=a_ , tokenizer=a_ )
processor.save_pretrained(a_ )
A_ : int = HubertForCTC(a_ )
else:
A_ : Tuple = HubertModel(a_ )
if is_finetuned:
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A_ , A_ , A_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ : Optional[Any] = model[0].eval()
recursively_load_weights(a_ , a_ , a_ )
hf_wavavec.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ : int = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 344 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ , a_ ) -> float:
"""simple docstring"""
A_ : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) -> Tuple:
A_ : str = parent
A_ : Tuple = batch_size
A_ : int = seq_length
A_ : List[Any] = is_training
A_ : Dict = use_input_mask
A_ : Any = use_token_type_ids
A_ : List[str] = use_labels
A_ : Dict = vocab_size
A_ : Optional[int] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : str = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Dict = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : List[Any] = num_labels
A_ : Union[str, Any] = num_choices
A_ : str = scope
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Union[str, Any] = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[int] = None
A_ : Dict = None
A_ : List[Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> List[str]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Dict = DistilBertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : Optional[Any] = DistilBertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[Any] = DistilBertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[Any] = self.num_labels
A_ : Any = DistilBertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Dict = self.num_labels
A_ : Optional[int] = DistilBertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.num_choices
A_ : List[str] = DistilBertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ) -> int:
A_ : Union[str, Any] = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : Dict = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = DistilBertModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , dim=37 )
def UpperCAmelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = DistilBertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A_ : str = True
A_ : Optional[int] = model_class(config=_lowerCamelCase )
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = torch.jit.trace(
_lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCamelCase , os.path.join(_lowerCamelCase , """traced_model.pt""" ) )
A_ : Union[str, Any] = torch.jit.load(os.path.join(_lowerCamelCase , """traced_model.pt""" ) , map_location=_lowerCamelCase )
loaded(inputs_dict["""input_ids"""].to(_lowerCamelCase ) , inputs_dict["""attention_mask"""].to(_lowerCamelCase ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> int:
A_ : Union[str, Any] = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A_ : Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A_ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A_ : Optional[int] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 344 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
while a != 0:
A_ , A_ : Union[str, Any] = b % a, a
return b
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
if gcd(a_ , a_ ) != 1:
A_ : Any = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(a_ )
A_ , A_ , A_ : str = 1, 0, a
A_ , A_ , A_ : Tuple = 0, 1, m
while va != 0:
A_ : str = ua // va
A_ , A_ , A_ , A_ , A_ , A_ : Optional[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 344 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Tuple = FileLock(str(tmpdir / """foo.lock""" ) )
A_ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
A_ : str = 0.01
with locka.acquire():
with pytest.raises(a_ ):
A_ : Any = time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Any = """a""" * 1_0_0_0 + """.lock"""
A_ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
A_ : Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 344 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 1 |
'''simple docstring'''
import operator as op
UpperCamelCase__ : Union[str, Any] = 'scaler.pt'
UpperCamelCase__ : Any = 'pytorch_model'
UpperCamelCase__ : Any = 'random_states'
UpperCamelCase__ : Tuple = 'optimizer'
UpperCamelCase__ : Any = 'scheduler'
UpperCamelCase__ : Any = 'pytorch_model.bin'
UpperCamelCase__ : Dict = 'pytorch_model.bin.index.json'
UpperCamelCase__ : Optional[Any] = 'model.safetensors'
UpperCamelCase__ : str = 'model.safetensors.index.json'
UpperCamelCase__ : Any = '1.10.2'
UpperCamelCase__ : str = 'py38'
UpperCamelCase__ : List[Any] = '4.17.0'
UpperCamelCase__ : str = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
UpperCamelCase__ : str = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
UpperCamelCase__ : str = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
UpperCamelCase__ : str = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
UpperCamelCase__ : List[str] = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
UpperCamelCase__ : Optional[Any] = '2.0.1'
UpperCamelCase__ : List[Any] = ['pdsh', 'standard', 'openmpi', 'mvapich']
UpperCamelCase__ : List[Any] = ['default', 'reduce-overhead', 'max-autotune']
UpperCamelCase__ : Union[str, Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase__ : Any = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
UpperCamelCase__ : Optional[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
UpperCamelCase__ : Optional[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 344 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 1 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = None
A_ : List[Any] = None
A_ : List[str] = graph
self._normalize_graph(_lowerCamelCase , _lowerCamelCase )
A_ : str = len(_lowerCamelCase )
A_ : str = None
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
if sources is int:
A_ : Tuple = [sources]
if sinks is int:
A_ : List[str] = [sinks]
if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) == 0:
return
A_ : Optional[Any] = sources[0]
A_ : List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowerCamelCase ) > 1 or len(_lowerCamelCase ) > 1:
A_ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ : int = max_input_flow
A_ : str = 0
A_ : List[str] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ : Optional[Any] = max_input_flow
A_ : str = size - 1
def UpperCAmelCase_ ( self ) -> str:
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
A_ : Union[str, Any] = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Tuple:
A_ : List[Any] = flow_network
A_ : Union[str, Any] = flow_network.verticesCount
A_ : List[Any] = flow_network.sourceIndex
A_ : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ : Optional[int] = flow_network.graph
A_ : Optional[int] = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
if not self.executed:
self._algorithm()
A_ : Optional[Any] = True
def UpperCAmelCase_ ( self ) -> List[str]:
pass
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> List[Any]:
super().__init__(_lowerCamelCase )
# use this to save your result
A_ : Dict = -1
def UpperCAmelCase_ ( self ) -> Any:
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Optional[Any]:
super().__init__(_lowerCamelCase )
A_ : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ : List[Any] = [0] * self.verticies_count
A_ : Union[str, Any] = [0] * self.verticies_count
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : List[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ : Optional[int] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ : List[Any] = 0
while i < len(_lowerCamelCase ):
A_ : str = vertices_list[i]
A_ : Optional[Any] = self.heights[vertex_index]
self.process_vertex(_lowerCamelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_lowerCamelCase ) )
A_ : Union[str, Any] = 0
else:
i += 1
A_ : Any = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowerCamelCase , _lowerCamelCase )
self.relabel(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : Dict = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ : List[Any] = self.heights[to_index]
if min_height is not None:
A_ : Optional[Any] = min_height + 1
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = [0]
UpperCamelCase__ : List[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase__ : List[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase__ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase__ : Any = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 344 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 1 |
'''simple docstring'''
import math
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
A_ : Optional[Any] = n
A_ : List[str] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # adjacency matrix for weight
A_ : Tuple = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Optional[int] = w
def UpperCAmelCase_ ( self ) -> List[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A_ : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase__ : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 1 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def UpperCAmelCase ( a_ , a_=False ) -> Optional[Any]:
"""simple docstring"""
A_ : Dict = OmegaConf.load(a_ )
if display:
print(yaml.dump(OmegaConf.to_container(a_ ) ) )
return config
def UpperCAmelCase ( a_ , a_=None , a_=None ) -> Union[str, Any]:
"""simple docstring"""
if conf_path is None:
A_ : Any = """./model_checkpoints/vqgan_only.yaml"""
A_ : str = load_config(a_ , display=a_ )
A_ : int = VQModel(**config.model.params )
if ckpt_path is None:
A_ : str = """./model_checkpoints/vqgan_only.pt"""
A_ : int = torch.load(a_ , map_location=a_ )
if ".ckpt" in ckpt_path:
A_ : Tuple = sd["""state_dict"""]
model.load_state_dict(a_ , strict=a_ )
model.to(a_ )
del sd
return model
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ , A_ , A_ : Dict = model.encode(a_ )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
A_ : List[Any] = model.decode(a_ )
return xrec
def UpperCAmelCase ( a_ , a_=False ) -> Union[str, Any]:
"""simple docstring"""
A_ , A_ : Optional[int] = string.rsplit(""".""" , 1 )
if reload:
A_ : Optional[int] = importlib.import_module(a_ )
importlib.reload(a_ )
return getattr(importlib.import_module(a_ , package=a_ ) , cls )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def UpperCAmelCase ( a_ , a_ , a_=True , a_=True ) -> Optional[int]:
"""simple docstring"""
A_ : str = instantiate_from_config(a_ )
if sd is not None:
model.load_state_dict(a_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
if ckpt:
A_ : str = torch.load(a_ , map_location="""cpu""" )
A_ : List[Any] = pl_sd["""global_step"""]
print(F"loaded model from global step {global_step}." )
else:
A_ : Optional[Any] = {"""state_dict""": None}
A_ : Optional[Any] = None
A_ : int = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=a_ , eval_mode=a_ )["""model"""]
return model, global_step
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ = 0 ) -> list:
"""simple docstring"""
A_ : int = length or len(a_ )
A_ : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A_ , A_ : Optional[Any] = list_data[i + 1], list_data[i]
A_ : Tuple = True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 1 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
A_ : Optional[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCamelCase )
A_ : List[Any] = self.values[key]
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
sum(self.charge_factor - len(_lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCamelCase , _lowerCamelCase )
| 344 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ : List[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCAmelCase ( a_ , a_=None ) -> Dict:
"""simple docstring"""
require_version(deps[pkg] , a_ )
| 344 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (DDPMParallelScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Optional[Any]:
A_ : Dict = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def UpperCAmelCase_ ( self ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Any = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Tuple = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Union[str, Any] = scheduler_class(**_lowerCamelCase )
A_ : Optional[int] = len(_lowerCamelCase )
A_ : Dict = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = self.dummy_sample_deter + 0.1
A_ : Optional[Any] = self.dummy_sample_deter - 0.1
A_ : List[Any] = samplea.shape[0]
A_ : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
A_ : Optional[Any] = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
A_ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A_ : str = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
A_ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def UpperCAmelCase_ ( self ) -> str:
A_ : int = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Optional[int] = scheduler_class(**_lowerCamelCase )
A_ : Union[str, Any] = len(_lowerCamelCase )
A_ : Dict = self.dummy_model()
A_ : Any = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Union[str, Any] = pred_prev_sample
A_ : int = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
A_ : int = scheduler_class(**_lowerCamelCase )
A_ : Dict = len(_lowerCamelCase )
A_ : Union[str, Any] = self.dummy_model()
A_ : List[str] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
A_ : str = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : Tuple = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[str] = pred_prev_sample
A_ : str = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
A_ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
A_ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
A_ : int = -1
else:
A_ : Dict = timesteps[i + 1]
A_ : Union[str, Any] = scheduler.previous_timestep(_lowerCamelCase )
A_ : str = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : List[Any] = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
A_ : List[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : str = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = [100, 87, 50, 1, 0]
A_ : List[str] = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : str = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ : Optional[int] = cst_fwd.get(a_ , np.inf )
A_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ : str = new_cost_f
A_ : List[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ : List[str] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
A_ : Tuple = -1
A_ : Union[str, Any] = set()
A_ : Tuple = set()
A_ : List[str] = {source: 0}
A_ : Union[str, Any] = {destination: 0}
A_ : Optional[Any] = {source: None}
A_ : Any = {destination: None}
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : PriorityQueue[Any] = PriorityQueue()
A_ : Any = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ : Dict = queue_forward.get()
visited_forward.add(a_ )
A_ , A_ : Optional[Any] = queue_backward.get()
visited_backward.add(a_ )
A_ : int = pass_and_relaxation(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
A_ : Any = pass_and_relaxation(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ : Optional[int] = shortest_distance
return shortest_path_distance
UpperCamelCase__ : int = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
UpperCamelCase__ : Optional[int] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 1 |
'''simple docstring'''
import pytest
UpperCamelCase__ : Optional[int] = '__dummy_dataset1__'
UpperCamelCase__ : List[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
A_ : Union[str, Any] = dataset_loading_script_name
A_ : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=a_ )
A_ : str = script_dir / F"{script_name}.py"
with open(a_ , """w""" ) as f:
f.write(a_ )
return str(a_ )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Any:
return self.get_dummy_input()
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def UpperCAmelCase_ ( self , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , ) -> str:
A_ : Union[str, Any] = 4
A_ : str = 32
A_ : Optional[Any] = (32, 32)
A_ : str = torch.manual_seed(0 )
A_ : Dict = torch.device(_lowerCamelCase )
A_ : Optional[Any] = (batch_size, num_channels) + sizes
A_ : Dict = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase )
A_ : Any = {"""hidden_states""": hidden_states}
if include_temb:
A_ : str = 128
A_ : Tuple = randn_tensor((batch_size, temb_channels) , generator=_lowerCamelCase , device=_lowerCamelCase )
if include_res_hidden_states_tuple:
A_ : int = torch.manual_seed(1 )
A_ : Tuple = (randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase ),)
if include_encoder_hidden_states:
A_ : Union[str, Any] = floats_tensor((batch_size, 32, 32) ).to(_lowerCamelCase )
if include_skip_sample:
A_ : List[Any] = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCamelCase , device=_lowerCamelCase )
return dummy_input
def UpperCAmelCase_ ( self ) -> Any:
A_ : Union[str, Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A_ : List[Any] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Any:
A_ , A_ : List[Any] = self.prepare_init_args_and_inputs_for_common()
A_ : str = self.block_class(**_lowerCamelCase )
unet_block.to(_lowerCamelCase )
unet_block.eval()
with torch.no_grad():
A_ : List[str] = unet_block(**_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ : Dict = output[0, -1, -3:, -3:]
A_ : Dict = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
assert torch_all_close(output_slice.flatten() , _lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : int = self.prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = self.block_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : Optional[Any] = model(**_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = output[0]
A_ : Optional[Any] = torch.device(_lowerCamelCase )
A_ : Tuple = randn_tensor(output.shape , device=_lowerCamelCase )
A_ : List[Any] = torch.nn.functional.mse_loss(_lowerCamelCase , _lowerCamelCase )
loss.backward()
| 344 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''deformable_detr'''
lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.25 , _lowerCamelCase=False , **_lowerCamelCase , ) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = backbone_config.get("""model_type""" )
A_ : Tuple = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(_lowerCamelCase )
A_ : int = use_timm_backbone
A_ : str = backbone_config
A_ : List[str] = num_channels
A_ : Dict = num_queries
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = d_model
A_ : Any = encoder_ffn_dim
A_ : Optional[int] = encoder_layers
A_ : int = encoder_attention_heads
A_ : Dict = decoder_ffn_dim
A_ : List[str] = decoder_layers
A_ : int = decoder_attention_heads
A_ : Optional[int] = dropout
A_ : List[str] = attention_dropout
A_ : int = activation_dropout
A_ : Optional[int] = activation_function
A_ : str = init_std
A_ : Any = init_xavier_std
A_ : List[str] = encoder_layerdrop
A_ : Any = auxiliary_loss
A_ : Optional[int] = position_embedding_type
A_ : List[Any] = backbone
A_ : Optional[int] = use_pretrained_backbone
A_ : Optional[int] = dilation
# deformable attributes
A_ : int = num_feature_levels
A_ : Optional[int] = encoder_n_points
A_ : int = decoder_n_points
A_ : Union[str, Any] = two_stage
A_ : List[Any] = two_stage_num_proposals
A_ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
A_ : Optional[int] = class_cost
A_ : List[str] = bbox_cost
A_ : Tuple = giou_cost
# Loss coefficients
A_ : Optional[Any] = mask_loss_coefficient
A_ : Any = dice_loss_coefficient
A_ : str = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : Tuple = eos_coefficient
A_ : str = focal_alpha
A_ : Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ) -> int:
return self.d_model
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A_ : Optional[Any] = self.backbone_config.to_dict()
A_ : Optional[int] = self.__class__.model_type
return output
| 344 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase__ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
UpperCamelCase__ : Tuple = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
UpperCamelCase__ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="auto" , _lowerCamelCase=-1 , _lowerCamelCase=0.9 , _lowerCamelCase=5 , _lowerCamelCase=500 , _lowerCamelCase="gpt2-large" , _lowerCamelCase=-1 , _lowerCamelCase=1024 , _lowerCamelCase=25 , _lowerCamelCase=5 , _lowerCamelCase=True , _lowerCamelCase=25 , ) -> List[str]:
A_ : str = compute_mauve(
p_text=_lowerCamelCase , q_text=_lowerCamelCase , p_features=_lowerCamelCase , q_features=_lowerCamelCase , p_tokens=_lowerCamelCase , q_tokens=_lowerCamelCase , num_buckets=_lowerCamelCase , pca_max_data=_lowerCamelCase , kmeans_explained_var=_lowerCamelCase , kmeans_num_redo=_lowerCamelCase , kmeans_max_iter=_lowerCamelCase , featurize_model_name=_lowerCamelCase , device_id=_lowerCamelCase , max_text_length=_lowerCamelCase , divergence_curve_discretization_size=_lowerCamelCase , mauve_scaling_factor=_lowerCamelCase , verbose=_lowerCamelCase , seed=_lowerCamelCase , )
return out
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Tuple = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''git_vision_model'''
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=3072 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3 , _lowerCamelCase=224 , _lowerCamelCase=16 , _lowerCamelCase="quick_gelu" , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , **_lowerCamelCase , ) -> Any:
super().__init__(**_lowerCamelCase )
A_ : List[str] = hidden_size
A_ : Tuple = intermediate_size
A_ : Tuple = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = num_channels
A_ : Optional[int] = patch_size
A_ : str = image_size
A_ : List[Any] = initializer_range
A_ : Union[str, Any] = attention_dropout
A_ : List[str] = layer_norm_eps
A_ : Dict = hidden_act
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : Union[str, Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
A_ : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''git'''
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1024 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=101 , _lowerCamelCase=102 , _lowerCamelCase=None , **_lowerCamelCase , ) -> str:
super().__init__(bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , pad_token_id=_lowerCamelCase , **_lowerCamelCase )
if vision_config is None:
A_ : Optional[Any] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
A_ : List[Any] = GitVisionConfig(**_lowerCamelCase )
A_ : List[Any] = vocab_size
A_ : Tuple = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[int] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = use_cache
A_ : Any = tie_word_embeddings
A_ : List[Any] = num_image_with_embedding
A_ : Optional[int] = bos_token_id
A_ : Tuple = eos_token_id
def UpperCAmelCase_ ( self ) -> int:
A_ : Optional[int] = copy.deepcopy(self.__dict__ )
A_ : Any = self.vision_config.to_dict()
A_ : Any = self.__class__.model_type
return output
| 344 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
A_ : List[Any] = end or len(a_ )
for i in range(a_ , a_ ):
A_ : Optional[int] = i
A_ : Dict = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
A_ : List[Any] = array[temp_index - 1]
temp_index -= 1
A_ : Tuple = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
A_ : List[Any] = index
A_ : List[str] = 2 * index + 1 # Left Node
A_ : List[str] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
A_ : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
A_ : Optional[int] = right_index
if largest != index:
A_ , A_ : List[str] = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
A_ : List[str] = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
A_ , A_ : str = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
A_ : str = low
A_ : List[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
A_ , A_ : Dict = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
A_ : Dict = 2 * math.ceil(math.loga(len(a_ ) ) )
A_ : Tuple = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
A_ : Union[str, Any] = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
A_ : Optional[int] = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
A_ : Any = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Tuple = input('Enter numbers separated by a comma : ').strip()
UpperCamelCase__ : Optional[int] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 344 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 1 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
UpperCamelCase__ : str = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
UpperCamelCase__ : Dict = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
UpperCamelCase__ : Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=1 , _lowerCamelCase="binary" , _lowerCamelCase=None , _lowerCamelCase="warn" , ) -> Tuple:
A_ : Dict = recall_score(
_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase , pos_label=_lowerCamelCase , average=_lowerCamelCase , sample_weight=_lowerCamelCase , zero_division=_lowerCamelCase , )
return {"recall": float(_lowerCamelCase ) if score.size == 1 else score}
| 344 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase__ : str = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
super().__init__(**_lowerCamelCase )
A_ : int = size if size is not None else {"""shortest_edge""": 256}
A_ : Dict = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
A_ : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
A_ : int = do_resize
A_ : Any = size
A_ : Optional[int] = resample
A_ : List[Any] = do_center_crop
A_ : List[Any] = crop_size
A_ : List[Any] = do_rescale
A_ : Tuple = rescale_factor
A_ : Optional[Any] = do_normalize
A_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ) -> np.ndarray:
A_ : List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
A_ : Any = get_resize_output_image_size(_lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) -> np.ndarray:
A_ : Optional[int] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ) -> np.ndarray:
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) -> np.ndarray:
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Dict = do_resize if do_resize is not None else self.do_resize
A_ : str = size if size is not None else self.size
A_ : str = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
A_ : Optional[Any] = resample if resample is not None else self.resample
A_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : int = crop_size if crop_size is not None else self.crop_size
A_ : Optional[Any] = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
A_ : str = do_rescale if do_rescale is not None else self.do_rescale
A_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : List[Any] = image_mean if image_mean is not None else self.image_mean
A_ : Optional[Any] = image_std if image_std is not None else self.image_std
A_ : List[Any] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ : List[str] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
A_ : Optional[Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
A_ : Optional[int] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
A_ : Tuple = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
A_ : Dict = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
A_ : Optional[int] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
A_ : Any = {"""pixel_values""": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[str]:
A_ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowerCamelCase ):
A_ : Union[str, Any] = target_sizes.numpy()
A_ : Any = []
for idx in range(len(_lowerCamelCase ) ):
A_ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowerCamelCase )
A_ : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
A_ : List[Any] = logits.argmax(dim=1 )
A_ : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 344 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 1 |
'''simple docstring'''
import math
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
A_ : Any = 0
A_ : Tuple = 0
while num > 0:
A_ : Tuple = num % 8
A_ : List[str] = octal + (remainder * math.floor(math.pow(1_0 , a_ ) ))
counter += 1
A_ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(a_ )}"
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(6_5 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(2_1_6 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> Optional[int]:
super().__init__(**_lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A_ : Union[str, Any] = kwargs.pop("""encoder""" )
A_ : int = encoder_config.pop("""model_type""" )
A_ : Dict = kwargs.pop("""decoder""" )
A_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[int] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : Optional[Any] = True
A_ : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : int = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 344 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : Optional[Any] = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['ViTFeatureExtractor']
UpperCamelCase__ : Optional[Any] = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = XLMTokenizer
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A_ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : List[Any] = """lower newer"""
A_ : Dict = """lower newer"""
return input_text, output_text
def UpperCAmelCase_ ( self ) -> int:
A_ : List[str] = XLMTokenizer(self.vocab_file , self.merges_file )
A_ : Tuple = """lower"""
A_ : Any = ["""low""", """er</w>"""]
A_ : Tuple = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = tokens + ["""<unk>"""]
A_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
A_ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
A_ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Any = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 344 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''unispeech'''
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=0.5 , **_lowerCamelCase , ) -> Optional[Any]:
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
A_ : Any = hidden_size
A_ : Any = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Any = list(_lowerCamelCase )
A_ : Tuple = list(_lowerCamelCase )
A_ : Union[str, Any] = list(_lowerCamelCase )
A_ : List[Any] = conv_bias
A_ : List[str] = num_conv_pos_embeddings
A_ : Dict = num_conv_pos_embedding_groups
A_ : Union[str, Any] = len(self.conv_dim )
A_ : List[Any] = num_hidden_layers
A_ : Optional[int] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Tuple = num_attention_heads
A_ : Optional[int] = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : List[str] = activation_dropout
A_ : int = feat_proj_dropout
A_ : List[str] = final_dropout
A_ : Optional[int] = layerdrop
A_ : Optional[int] = layer_norm_eps
A_ : Tuple = initializer_range
A_ : Dict = num_ctc_classes
A_ : Any = vocab_size
A_ : Union[str, Any] = do_stable_layer_norm
A_ : Tuple = use_weighted_layer_sum
A_ : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Optional[int] = apply_spec_augment
A_ : Dict = mask_time_prob
A_ : Tuple = mask_time_length
A_ : Optional[Any] = mask_time_min_masks
A_ : List[str] = mask_feature_prob
A_ : Any = mask_feature_length
A_ : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A_ : Tuple = num_codevectors_per_group
A_ : Union[str, Any] = num_codevector_groups
A_ : Dict = contrastive_logits_temperature
A_ : List[str] = feat_quantizer_dropout
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : Dict = proj_codevector_dim
A_ : Union[str, Any] = diversity_loss_weight
# ctc loss
A_ : int = ctc_loss_reduction
A_ : Optional[Any] = ctc_zero_infinity
# pretraining loss
A_ : Any = replace_prob
@property
def UpperCAmelCase_ ( self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 344 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : Dict = UniSpeechSatForSequenceClassification.from_pretrained(a_ , config=a_ )
A_ : int = downstream_dict["""projector.weight"""]
A_ : List[str] = downstream_dict["""projector.bias"""]
A_ : List[str] = downstream_dict["""model.post_net.linear.weight"""]
A_ : Optional[int] = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = UniSpeechSatForAudioFrameClassification.from_pretrained(a_ , config=a_ )
A_ : Dict = downstream_dict["""model.linear.weight"""]
A_ : Tuple = downstream_dict["""model.linear.bias"""]
return model
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = UniSpeechSatForXVector.from_pretrained(a_ , config=a_ )
A_ : Optional[Any] = downstream_dict["""connector.weight"""]
A_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ : Dict = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
A_ : str = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
A_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A_ : Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A_ : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[Any] = torch.load(a_ , map_location="""cpu""" )
A_ : Optional[Any] = checkpoint["""Downstream"""]
A_ : Optional[int] = UniSpeechSatConfig.from_pretrained(a_ )
A_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
A_ : List[str] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A_ : Any = convert_classification(a_ , a_ , a_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A_ : Any = convert_diarization(a_ , a_ , a_ )
elif arch.endswith("""ForXVector""" ):
A_ : Optional[int] = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
A_ : Any = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase__ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 344 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''open-llama'''
def __init__( self , _lowerCamelCase=10_0000 , _lowerCamelCase=4096 , _lowerCamelCase=1_1008 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase="silu" , _lowerCamelCase=2048 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) -> Optional[int]:
A_ : List[str] = vocab_size
A_ : Dict = max_position_embeddings
A_ : Dict = hidden_size
A_ : List[Any] = intermediate_size
A_ : int = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Any = hidden_act
A_ : int = initializer_range
A_ : Dict = rms_norm_eps
A_ : Optional[Any] = use_cache
A_ : int = kwargs.pop(
"""use_memorry_efficient_attention""" , _lowerCamelCase )
A_ : Dict = hidden_dropout_prob
A_ : Optional[Any] = attention_dropout_prob
A_ : Optional[int] = use_stable_embedding
A_ : List[Any] = shared_input_output_embedding
A_ : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"got {self.rope_scaling}" )
A_ : List[Any] = self.rope_scaling.get("""type""" , _lowerCamelCase )
A_ : Union[str, Any] = self.rope_scaling.get("""factor""" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''funnel'''
lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=[4, 4, 4] , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=64 , _lowerCamelCase=3072 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=None , _lowerCamelCase=1e-9 , _lowerCamelCase="mean" , _lowerCamelCase="relative_shift" , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , **_lowerCamelCase , ) -> Optional[int]:
A_ : Optional[Any] = vocab_size
A_ : List[str] = block_sizes
A_ : str = [1] * len(_lowerCamelCase ) if block_repeats is None else block_repeats
assert len(_lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A_ : str = num_decoder_layers
A_ : Tuple = d_model
A_ : Dict = n_head
A_ : Dict = d_head
A_ : Any = d_inner
A_ : Dict = hidden_act
A_ : int = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Dict = activation_dropout
A_ : Tuple = initializer_range
A_ : List[str] = initializer_std
A_ : int = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
A_ : List[str] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
A_ : str = attention_type
A_ : List[str] = separate_cls
A_ : Optional[Any] = truncate_seq
A_ : Dict = pool_q_only
super().__init__(**_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return len(self.block_sizes )
@num_blocks.setter
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict:
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase__ : int = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Dict = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
A_ : Tuple = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
A_ : Union[str, Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
A_ : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A_ : Optional[Any] = model(_lowerCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
A_ : List[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
A_ : List[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
A_ : Optional[int] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
A_ : int = model(_lowerCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
| 344 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase__ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 32 , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowerCamelCase = True , _lowerCamelCase=7 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=3 , ) -> Union[str, Any]:
A_ : Optional[int] = parent
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 288}
A_ : Tuple = size_divisor
A_ : List[Any] = do_rescale
A_ : Dict = rescale_factor
A_ : List[Any] = do_normalize
A_ : Dict = do_center_crop
A_ : Optional[Any] = image_mean
A_ : List[str] = image_std
A_ : str = do_pad
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = min_resolution
A_ : Union[str, Any] = max_resolution
def UpperCAmelCase_ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
if not batched:
A_ : Union[str, Any] = self.size["""shortest_edge"""]
A_ : Dict = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
A_ : Optional[int] = size / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A_ , A_ : Optional[Any] = size, scale * w
else:
A_ , A_ : Dict = scale * h, size
A_ : Union[str, Any] = int((1333 / 800) * size )
if max(_lowerCamelCase , _lowerCamelCase ) > max_size:
A_ : str = max_size / max(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = newh * scale
A_ : Dict = neww * scale
A_ , A_ : str = int(newh + 0.5 ), int(neww + 0.5 )
A_ , A_ : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : Tuple = []
for image in image_inputs:
A_ , A_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
A_ : Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size_divisor""" ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image processor
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
A_ : Dict = []
def generate(a_ , a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ , A_ : Any = arr[k - 1], arr[i]
else: # k is odd
A_ , A_ : List[str] = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
UpperCamelCase__ : List[str] = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
UpperCamelCase__ : str = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def UpperCAmelCase ( a_ , a_ = 1 , a_ = "new" , a_ = None ) -> dict:
"""simple docstring"""
A_ : List[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a_ ) - valid_terms ) ):
A_ : int = F"Invalid search term: {invalid_search_terms}"
raise ValueError(a_ )
A_ : Union[str, Any] = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
A_ : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a_ )}
A_ : int = {}
for id_ in range(a_ ):
A_ : int = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 344 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> int:
A_ : str = tempfile.mkdtemp()
A_ : Union[str, Any] = BlipImageProcessor()
A_ : Dict = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Dict = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Any = InstructBlipProcessor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).tokenizer
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).qformer_tokenizer
def UpperCAmelCase_ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ) -> Any:
A_ : Tuple = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Union[str, Any] = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Any = image_processor(_lowerCamelCase , return_tensors="""np""" )
A_ : List[Any] = processor(images=_lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase )
A_ : int = """lower newer"""
A_ : Optional[int] = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
A_ : Any = qformer_tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase_ ( self ) -> Any:
A_ : List[Any] = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[Any] = self.get_qformer_tokenizer()
A_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase )
A_ : Union[str, Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Optional[int] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : List[str] = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : Optional[Any] = self.get_qformer_tokenizer()
A_ : Dict = InstructBlipProcessor(
tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase )
A_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[Any] = processor.batch_decode(_lowerCamelCase )
A_ : Any = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
A_ : List[Any] = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase , qformer_tokenizer=_lowerCamelCase )
A_ : Union[str, Any] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : int = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : Any = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCamelCase__ : List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCamelCase__ : str = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCamelCase__ : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[str] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''blenderbot-small'''
lowerCamelCase = ['''past_key_values''']
lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ) -> List[Any]:
A_ : str = vocab_size
A_ : Union[str, Any] = max_position_embeddings
A_ : Union[str, Any] = d_model
A_ : Any = encoder_ffn_dim
A_ : Optional[int] = encoder_layers
A_ : Dict = encoder_attention_heads
A_ : int = decoder_ffn_dim
A_ : str = decoder_layers
A_ : Dict = decoder_attention_heads
A_ : Union[str, Any] = dropout
A_ : Tuple = attention_dropout
A_ : List[str] = activation_dropout
A_ : Optional[int] = activation_function
A_ : Union[str, Any] = init_std
A_ : Union[str, Any] = encoder_layerdrop
A_ : Union[str, Any] = decoder_layerdrop
A_ : str = use_cache
A_ : Union[str, Any] = encoder_layers
A_ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A_ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_ : str = {0: """batch"""}
A_ : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A_ : int = {0: """batch""", 1: """decoder_sequence"""}
A_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : List[str] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_ , A_ : List[str] = self.num_layers
for i in range(_lowerCamelCase ):
A_ : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
A_ : str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[int] = super().outputs
else:
A_ : str = super(_lowerCamelCase , self ).outputs
if self.use_past:
A_ , A_ : List[Any] = self.num_layers
for i in range(_lowerCamelCase ):
A_ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
A_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
A_ : Optional[int] = seq_length if not self.use_past else 1
A_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : List[str] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A_ : Optional[int] = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ : int = common_inputs["""input_ids"""].shape
A_ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
A_ , A_ : List[Any] = self.num_attention_heads
A_ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : List[Any] = decoder_seq_length + 3
A_ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
A_ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_ , A_ : Optional[Any] = self.num_layers
A_ : Tuple = min(_lowerCamelCase , _lowerCamelCase )
A_ : Any = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
A_ : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
A_ : List[str] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
A_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_ , A_ : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ : List[Any] = seqlen + 2
A_ , A_ : List[str] = self.num_layers
A_ , A_ : List[Any] = self.num_attention_heads
A_ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Any = common_inputs["""attention_mask"""].dtype
A_ : Tuple = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
A_ : Tuple = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : List[str] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Tuple = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
A_ : Dict = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
A_ : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : Dict = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
A_ : List[str] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
A_ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
if self.task in ["default", "seq2seq-lm"]:
A_ : List[Any] = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
A_ : str = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 344 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=None ) -> int:
A_ : Tuple = np.random.default_rng(_lowerCamelCase )
A_ : Any = length
A_ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
A_ : List[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self , _lowerCamelCase ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class _lowerCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=False ) -> Any:
super().__init__()
A_ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A_ : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A_ : str = True
def UpperCAmelCase_ ( self , _lowerCamelCase=None ) -> Tuple:
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
A_ : Tuple = False
return x * self.a[0] + self.b[0]
class _lowerCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=False ) -> List[str]:
super().__init__()
A_ : Any = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
A_ : List[str] = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
A_ : Optional[Any] = True
def UpperCAmelCase_ ( self , _lowerCamelCase=None ) -> int:
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
A_ : Optional[Any] = False
return x * self.a + self.b
def UpperCAmelCase ( a_ , a_ = 1_6 ) -> List[str]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
A_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A_ : Optional[int] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
A_ : str = load_dataset("""csv""" , data_files=a_ )
A_ : str = datasets["""train"""].unique("""label""" )
A_ : Union[str, Any] = {v: i for i, v in enumerate(a_ )}
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
A_ : Union[str, Any] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=a_ , max_length=a_ , padding="""max_length""" )
if "label" in examples:
A_ : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : List[Any] = datasets.map(
a_ , batched=a_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(a_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A_ : Union[str, Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=a_ , collate_fn=a_ , batch_size=2 )
A_ : str = DataLoader(tokenized_datasets["""validation"""] , shuffle=a_ , collate_fn=a_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 344 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> List[Any]:
A_ : List[str] = metric_id
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = [MetricMock(__A ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def UpperCAmelCase_ ( self ) -> str:
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
if "tmp_path" in args:
A_ : List[str] = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(a_ , match="""https://huggingface.co/docs/evaluate""" ):
func(*a_ )
| 344 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase__ : Dict = sys.version_info >= (3, 10)
def UpperCAmelCase ( a_=None , a_=None ) -> Optional[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a_ )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = field(default='''toto''', metadata={'''help''': '''help message'''} )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''titi'''
lowerCamelCase = '''toto'''
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''titi'''
lowerCamelCase = '''toto'''
lowerCamelCase = 42
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = "toto"
def UpperCAmelCase_ ( self ) -> Any:
A_ : int = BasicEnum(self.foo )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = "toto"
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = MixedTypeEnum(self.foo )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = field(default=__A, metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[1, 2, 3] )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCamelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = field()
lowerCamelCase = field()
lowerCamelCase = field()
def UpperCAmelCase_ ( self ) -> Any:
A_ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = field()
lowerCamelCase = None
lowerCamelCase = field(default='''toto''', metadata={'''help''': '''help message'''} )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = field(default=__A, metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : List[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != """container"""}
A_ : Any = {k: v for k, v in vars(_lowerCamelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _lowerCamelCase ) and yy.get("""choices""" , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_lowerCamelCase ) , yy["""type"""](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--bar""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--baz""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--flag""" , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs="""?""" )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((A_) , ) : str = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Dict = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCamelCase , help="""help message""" )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_lowerCamelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Optional[Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : int = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Tuple = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Dict = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Dict = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A_ : Dict = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A_ : Optional[Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
A_ : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCAmelCase_ ( self ) -> int:
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCamelCase = "toto"
A_ : Dict = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
A_ : Tuple = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
A_ : Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_lowerCamelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCamelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument("""--bar""" , default=_lowerCamelCase , type=_lowerCamelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_lowerCamelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_lowerCamelCase )
A_ : Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Tuple = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument("""--required_str""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCamelCase , )
expected.add_argument("""--opt""" , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCamelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Dict = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
A_ : Dict = parser.parse_dict(_lowerCamelCase )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Any = os.path.join(_lowerCamelCase , """temp_json""" )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
A_ : Any = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Tuple = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : List[str] = os.path.join(_lowerCamelCase , """temp_yaml""" )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : int = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Tuple = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 344 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (UnCLIPScheduler,)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCamelCase , prev_timestep=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(variance_type="""learned_range""" )
A_ : Dict = scheduler_class(**_lowerCamelCase )
A_ : Dict = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCamelCase ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCamelCase ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCamelCase ) - -0.001_0011 < 1e-5
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
A_ : int = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter
A_ : Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Any = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : List[Any] = pred_prev_sample
A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(25 )
A_ : List[str] = scheduler.timesteps
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
A_ : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(_lowerCamelCase ):
# 1. predict noise residual
A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
if i + 1 == timesteps.shape[0]:
A_ : List[str] = None
else:
A_ : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
A_ : str = scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , prev_timestep=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
A_ : Optional[Any] = pred_prev_sample
A_ : Dict = torch.sum(torch.abs(_lowerCamelCase ) )
A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
pass
| 344 | 1 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) -> Optional[Any]:
A_ : Optional[int] = scheduler
A_ : Tuple = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
A_ : Any = split_batches
A_ : Optional[int] = step_with_optimizer
A_ : int = GradientState()
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A_ : Dict = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.scheduler.get_last_lr()
def UpperCAmelCase_ ( self ) -> str:
return self.scheduler.state_dict()
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
self.scheduler.load_state_dict(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
return self.scheduler.get_lr()
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 344 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''blip_2_vision_model'''
def __init__( self , _lowerCamelCase=1408 , _lowerCamelCase=6144 , _lowerCamelCase=39 , _lowerCamelCase=16 , _lowerCamelCase=224 , _lowerCamelCase=14 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0_0001 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-10 , _lowerCamelCase=True , **_lowerCamelCase , ) -> str:
super().__init__(**_lowerCamelCase )
A_ : List[str] = hidden_size
A_ : int = intermediate_size
A_ : Tuple = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = patch_size
A_ : Dict = image_size
A_ : Any = initializer_range
A_ : Optional[int] = attention_dropout
A_ : int = layer_norm_eps
A_ : Tuple = hidden_act
A_ : Any = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : Union[str, Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''blip_2_qformer'''
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=2 , _lowerCamelCase=1408 , **_lowerCamelCase , ) -> int:
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : Union[str, Any] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : str = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : Dict = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Tuple = initializer_range
A_ : int = layer_norm_eps
A_ : int = position_embedding_type
A_ : Dict = cross_attention_frequency
A_ : Union[str, Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowerCamelCase )
A_ , A_ : int = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : List[str] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCamelCase , **_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''blip-2'''
lowerCamelCase = True
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=32 , **_lowerCamelCase ) -> List[str]:
super().__init__(**_lowerCamelCase )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
A_ : str = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
A_ : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
A_ : List[str] = BlipaVisionConfig(**_lowerCamelCase )
A_ : List[str] = BlipaQFormerConfig(**_lowerCamelCase )
A_ : List[str] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
A_ : List[str] = CONFIG_MAPPING[text_model_type](**_lowerCamelCase )
A_ : Union[str, Any] = self.text_config.tie_word_embeddings
A_ : Dict = self.text_config.is_encoder_decoder
A_ : List[Any] = num_query_tokens
A_ : List[Any] = self.vision_config.hidden_size
A_ : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A_ : Optional[int] = 1.0
A_ : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) -> Optional[int]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowerCamelCase , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[str] = self.qformer_config.to_dict()
A_ : Union[str, Any] = self.text_config.to_dict()
A_ : Optional[int] = self.__class__.model_type
return output
| 344 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> str:
super().__init__()
A_ : Optional[Any] = pad_token_id
A_ : List[Any] = max_length
A_ : str = vocab
A_ : Union[str, Any] = merges
A_ : List[Any] = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> int:
A_ : Tuple = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
A_ : Dict = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> str:
A_ : Tuple = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> List[Any]:
return cls(**_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Any:
A_ : List[Any] = self.tf_tokenizer(_lowerCamelCase )
A_ : Any = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
A_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
A_ , A_ : Tuple = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 344 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( a_ , a_ , a_ = "x" , a_ = 1_0**-1_0 , a_ = 1 , ) -> complex:
"""simple docstring"""
A_ : Tuple = symbols(a_ )
A_ : Optional[Any] = lambdify(a_ , a_ )
A_ : Dict = lambdify(a_ , diff(a_ , a_ ) )
A_ : List[Any] = starting_point
while True:
if diff_function(a_ ) != 0:
A_ : Tuple = prev_guess - multiplicity * func(a_ ) / diff_function(
a_ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A_ : Tuple = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f'{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Optional[Any] = name
A_ : Dict = value
A_ : Union[str, Any] = weight
def __repr__( self ) -> List[str]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return self.value
def UpperCAmelCase_ ( self ) -> List[str]:
return self.name
def UpperCAmelCase_ ( self ) -> Tuple:
return self.weight
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.value / self.weight
def UpperCAmelCase ( a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Optional[int] = []
for i in range(len(a_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[Any] = sorted(a_ , key=a_ , reverse=a_ )
A_ : str = []
A_ , A_ : Dict = 0.0, 0.0
for i in range(len(a_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
A_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : str = [mem.copy() for i in range(6 )]
A_ : Any = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : int = Text("""CPU""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : List[Any] = [mem.copy() for i in range(4 )]
A_ : Dict = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = Text("""GPU""" , font_size=24 )
A_ : Optional[Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
A_ : List[str] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : str = Text("""Model""" , font_size=24 )
A_ : str = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = []
for i, rect in enumerate(_lowerCamelCase ):
rect.set_stroke(_lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
A_ : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCamelCase , buff=0.0 )
self.add(_lowerCamelCase )
cpu_targs.append(_lowerCamelCase )
A_ : List[Any] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : int = Text("""Loaded Checkpoint""" , font_size=24 )
A_ : str = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , aligned_edge=_lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
A_ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : List[Any] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
A_ : Any = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.play(Write(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) )
A_ : List[str] = []
A_ : str = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : Union[str, Any] = fill.copy().set_fill(_lowerCamelCase , opacity=0.7 )
target.move_to(_lowerCamelCase )
first_animations.append(GrowFromCenter(_lowerCamelCase , run_time=1 ) )
A_ : Optional[int] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 344 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( a_ , a_ ) -> tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.