code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> list:
"""simple docstring"""
lowercase_ : List[Any] = [0] * len(a )
for i in range(1 , len(a ) ):
# use last results for better performance - dynamic programming
lowercase_ : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase_ : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase_ : Any = j
return prefix_result
def _UpperCAmelCase ( a : str ) -> int:
"""simple docstring"""
return max(prefix_function(a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int = 1_0 ) -> str:
"""simple docstring"""
if not isinstance(a , a ) or n < 0:
raise ValueError('Invalid input' )
lowercase_ : List[str] = 1_0**n
lowercase_ : Optional[int] = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> str:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_lowercase ):
lowercase_ : Optional[Any] = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase_ : int = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_lowercase ):
lowercase_ : List[str] = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase_ : Optional[Any] = FlaxAutoModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def lowerCamelCase__ ( self ) -> Tuple:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(_lowercase )
lowercase_ : Dict = FlaxBertModel.from_pretrained(_lowercase )
lowercase_ : int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
@slow
def lowerCamelCase__ ( self ) -> Optional[Any]:
for model_name in ["roberta-base", "roberta-large"]:
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase )
lowercase_ : Optional[int] = FlaxRobertaModel.from_pretrained(_lowercase )
lowercase_ : int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_lowercase ):
return model(**_lowercase )
eval(**_lowercase ).block_until_ready()
def lowerCamelCase__ ( self ) -> Tuple:
with self.assertRaisesRegex(
_lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase_ : List[Any] = FlaxAutoModel.from_pretrained('bert-base' )
def lowerCamelCase__ ( self ) -> int:
with self.assertRaisesRegex(
_lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase_ : Union[str, Any] = FlaxAutoModel.from_pretrained(_lowercase , revision='aaaaaa' )
def lowerCamelCase__ ( self ) -> str:
with self.assertRaisesRegex(
_lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
lowercase_ : Union[str, Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCamelCase__ ( self ) -> Optional[int]:
with self.assertRaisesRegex(_lowercase , 'Use `from_pt=True` to load this model' ):
lowercase_ : Optional[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 705 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 7 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(a )
lowercase_ : List[str] = cos(a )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : Tuple = (1 - _cos) / 2
lowercase_ : Any = 1 - _cos
lowercase_ : List[Any] = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[str] = 1 - alpha
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(a )
lowercase_ : Dict = cos(a )
lowercase_ : List[str] = _sin / (2 * q_factor)
lowercase_ : Any = (1 + _cos) / 2
lowercase_ : Any = -1 - _cos
lowercase_ : int = 1 + alpha
lowercase_ : Dict = -2 * _cos
lowercase_ : Dict = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Optional[Any] = tau * frequency / samplerate
lowercase_ : List[Any] = sin(a )
lowercase_ : Any = cos(a )
lowercase_ : List[str] = _sin / (2 * q_factor)
lowercase_ : List[Any] = _sin / 2
lowercase_ : Optional[int] = 0
lowercase_ : Dict = -ba
lowercase_ : Optional[Any] = 1 + alpha
lowercase_ : Dict = -2 * _cos
lowercase_ : str = 1 - alpha
lowercase_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(a )
lowercase_ : Union[str, Any] = cos(a )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : int = 1 - alpha
lowercase_ : Dict = -2 * _cos
lowercase_ : List[Any] = 1 + alpha
lowercase_ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Optional[Any] = sin(a )
lowercase_ : Any = cos(a )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : List[str] = 1_0 ** (gain_db / 4_0)
lowercase_ : List[Any] = 1 + alpha * big_a
lowercase_ : Tuple = -2 * _cos
lowercase_ : Any = 1 - alpha * big_a
lowercase_ : Dict = 1 + alpha / big_a
lowercase_ : Optional[int] = -2 * _cos
lowercase_ : Optional[int] = 1 - alpha / big_a
lowercase_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowercase_ : Tuple = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(a )
lowercase_ : Any = cos(a )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : str = 1_0 ** (gain_db / 4_0)
lowercase_ : str = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Any = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : List[str] = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Any = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : List[str] = 2 * sqrt(a ) * alpha
lowercase_ : Optional[Any] = big_a * (pmc + aaa)
lowercase_ : Dict = 2 * big_a * mpc
lowercase_ : List[str] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : List[str] = -2 * pmpc
lowercase_ : List[Any] = ppmc - aaa
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowercase_ : str = tau * frequency / samplerate
lowercase_ : str = sin(a )
lowercase_ : Tuple = cos(a )
lowercase_ : Tuple = _sin / (2 * q_factor)
lowercase_ : int = 1_0 ** (gain_db / 4_0)
lowercase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : str = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Tuple = 2 * sqrt(a ) * alpha
lowercase_ : List[str] = big_a * (ppmc + aaa)
lowercase_ : Optional[Any] = -2 * big_a * pmpc
lowercase_ : Union[str, Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[int] = pmc + aaa
lowercase_ : Any = 2 * mpc
lowercase_ : Optional[int] = pmc - aaa
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: int = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
while a != 0:
lowercase_ : int = b % a, a
return b
def _UpperCAmelCase ( a : int , a : int ) -> int:
"""simple docstring"""
if gcd(a , a ) != 1:
lowercase_ : List[Any] = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(a )
lowercase_ : int = 1, 0, a
lowercase_ : Tuple = 0, 1, m
while va != 0:
lowercase_ : List[str] = ua // va
lowercase_ : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 707 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
lowercase_ : Dict = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase_ : Dict = ''
lowercase_ : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase_ , lowercase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase_ : List[Any] = [1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
lowercase_ : Dict = 0
for j in range(len(a ) ):
lowercase_ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase_ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase_ : Tuple = j - k + 1 # noqa: E741
lowercase_ : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase_ : Tuple = length[j]
lowercase_ : List[Any] = j
# create that string
lowercase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __magic_name__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = XGLMConfig
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'gelu'
def __init__( self , _lowercase , _lowercase=14 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , ) -> str:
lowercase_ : Any = parent
lowercase_ : Tuple = batch_size
lowercase_ : Dict = seq_length
lowercase_ : List[str] = is_training
lowercase_ : int = use_input_mask
lowercase_ : Any = use_labels
lowercase_ : str = vocab_size
lowercase_ : Dict = d_model
lowercase_ : str = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : List[Any] = ffn_dim
lowercase_ : Optional[int] = activation_function
lowercase_ : int = activation_dropout
lowercase_ : Optional[Any] = attention_dropout
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Optional[int] = initializer_range
lowercase_ : str = None
lowercase_ : Any = 0
lowercase_ : Union[str, Any] = 2
lowercase_ : Optional[Any] = 1
def snake_case__ ( self ) -> int:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case__ ( self ) -> Any:
lowercase_ : List[str] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowercase_ : Optional[Any] = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : str = self.get_config()
lowercase_ : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case__ ( self ) -> Any:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowercase , )
def snake_case__ ( self ) -> Dict:
lowercase_ : str = self.prepare_config_and_inputs()
(
lowercase_
) : Dict = config_and_inputs
lowercase_ : Optional[Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : int = False
def snake_case__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = TFXGLMModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=_lowercase , n_embd=37 )
def snake_case__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@slow
def snake_case__ ( self ) -> int:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TFXGLMModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case__ ( self ) -> List[Any]:
super().test_resize_token_embeddings()
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self , _lowercase=True ) -> List[Any]:
lowercase_ : List[str] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
lowercase_ : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase_ : Any = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
lowercase_ : Tuple = model.generate(_lowercase , do_sample=_lowercase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowercase )
@slow
def snake_case__ ( self ) -> List[str]:
lowercase_ : int = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
lowercase_ : Dict = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
lowercase_ : Any = tokenizer('Today is a nice day and' , return_tensors='tf' )
lowercase_ : Any = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
lowercase_ : List[str] = model.generate(_lowercase , do_sample=_lowercase , seed=[7, 0] )
lowercase_ : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowercase )
lowercase_ : Optional[Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_lowercase , _lowercase )
@slow
def snake_case__ ( self ) -> Union[str, Any]:
lowercase_ : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
lowercase_ : Optional[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
lowercase_ : str = 'left'
# use different length sentences to test batching
lowercase_ : str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
lowercase_ : List[Any] = tokenizer(_lowercase , return_tensors='tf' , padding=_lowercase )
lowercase_ : Tuple = inputs['input_ids']
lowercase_ : Optional[int] = model.generate(input_ids=_lowercase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
lowercase_ : Optional[Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowercase_ : List[Any] = model.generate(input_ids=_lowercase , max_new_tokens=12 )
lowercase_ : List[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowercase_ : Tuple = model.generate(input_ids=_lowercase , max_new_tokens=12 )
lowercase_ : Union[str, Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
lowercase_ : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowercase )
lowercase_ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowercase )
lowercase_ : Dict = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , [non_padded_sentence, padded_sentence] )
| 708 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A: Optional[Any] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'albert'
def __init__( self , _lowercase=3_0000 , _lowercase=128 , _lowercase=4096 , _lowercase=12 , _lowercase=1 , _lowercase=64 , _lowercase=1_6384 , _lowercase=1 , _lowercase="gelu_new" , _lowercase=0 , _lowercase=0 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=0.1 , _lowercase="absolute" , _lowercase=0 , _lowercase=2 , _lowercase=3 , **_lowercase , ) -> Any:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ : Dict = vocab_size
lowercase_ : List[str] = embedding_size
lowercase_ : Any = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Tuple = num_hidden_groups
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : int = inner_group_num
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Dict = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : List[str] = classifier_dropout_prob
lowercase_ : Optional[int] = position_embedding_type
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 709 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__a: Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ['pixel_values']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = IMAGENET_DEFAULT_MEAN , _lowercase = IMAGENET_DEFAULT_STD , **_lowercase , ) -> None:
super().__init__(**_lowercase )
lowercase_ : Any = size if size is not None else {'shortest_edge': 224}
lowercase_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
lowercase_ : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase_ : str = get_size_dict(_lowercase , param_name='crop_size' )
lowercase_ : Any = do_resize
lowercase_ : List[Any] = size
lowercase_ : Tuple = resample
lowercase_ : Union[str, Any] = do_center_crop
lowercase_ : List[str] = crop_size
lowercase_ : Any = do_rescale
lowercase_ : Any = rescale_factor
lowercase_ : Any = do_normalize
lowercase_ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
lowercase_ : Optional[Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ : Any = int((256 / 224) * size['shortest_edge'] )
lowercase_ : Dict = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase )
lowercase_ : str = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
_lowercase , size=(size_dict['height'], size_dict['width']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
lowercase_ : Optional[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> BatchFeature:
lowercase_ : int = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[int] = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase_ : str = image_std if image_std is not None else self.image_std
lowercase_ : List[str] = size if size is not None else self.size
lowercase_ : str = get_size_dict(_lowercase , default_to_square=_lowercase )
lowercase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Optional[Any] = get_size_dict(_lowercase , param_name='crop_size' )
lowercase_ : Union[str, Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Union[str, Any] = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
lowercase_ : Dict = [self.resize(_lowercase , _lowercase , _lowercase ) for image in images]
if do_center_crop:
lowercase_ : Optional[int] = [self.center_crop(_lowercase , _lowercase ) for image in images]
if do_rescale:
lowercase_ : Optional[Any] = [self.rescale(_lowercase , _lowercase ) for image in images]
if do_normalize:
lowercase_ : Dict = [self.normalize(_lowercase , _lowercase , _lowercase ) for image in images]
lowercase_ : Tuple = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
lowercase_ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 710 |
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A: Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
A: List[Any] = json.load(f)
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCamelCase__ ( self , _lowercase ) -> Optional[int]:
lowercase_ : str = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowercase_ : Optional[Any] = f"facebook/wmt19-{pair}"
lowercase_ : str = self.get_tokenizer(_lowercase )
lowercase_ : Any = self.get_model(_lowercase )
lowercase_ : Any = bleu_data[pair]['src']
lowercase_ : Any = bleu_data[pair]['tgt']
lowercase_ : Dict = tokenizer(_lowercase , return_tensors='pt' , truncation=_lowercase , padding='longest' ).to(_lowercase )
lowercase_ : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase_ : Any = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
lowercase_ : Union[str, Any] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores['bleu'] , _lowercase )
| 711 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ['transformers', 'torch', 'note_seq']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCamelCase__ ( cls , *_lowercase , **_lowercase ) -> Dict:
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 7 | 0 |
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A: Optional[int] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _UpperCAmelCase ( a : Union[str, Any] , a : List[str]=None , a : List[Any]=None , a : Optional[Any]=None ) -> str:
"""simple docstring"""
lowercase_ : Tuple = True
while ask_again:
lowercase_ : Optional[Any] = input(a )
try:
if default is not None and len(a ) == 0:
return default
return convert_value(a ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a )
def _UpperCAmelCase ( a : Dict , a : Union[str, Any]=[] , a : Optional[Any]=None , a : Any=0 ) -> Any:
"""simple docstring"""
lowercase_ : Tuple = BulletMenu(a , a )
lowercase_ : Dict = menu.run(default_choice=a )
return convert_value(a ) if convert_value is not None else result
def _UpperCAmelCase ( a : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase_ : str = int(a )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def _UpperCAmelCase ( a : str ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[str] = int(a )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def _UpperCAmelCase ( a : List[str] ) -> Any:
"""simple docstring"""
lowercase_ : List[Any] = int(a )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCAmelCase ( a : int ) -> str:
"""simple docstring"""
lowercase_ : Union[str, Any] = int(a )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def _UpperCAmelCase ( a : int ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = int(a )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __magic_name__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
lowercase_ : str = super()._format_usage(_lowercase , _lowercase , _lowercase , _lowercase )
lowercase_ : int = usage.replace('<command> [<args>] ' , '' )
return usage
| 712 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __magic_name__ ( unittest.TestCase, UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : List[str] = load_tool('text-classification' )
self.tool.setup()
lowercase_ : Dict = load_tool('text-classification' , remote=_lowercase )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[Any] = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Tuple = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Any = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_lowercase , 'positive' )
| 713 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( a : int = 4 ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Tuple = abs(a ) or 4
return [[1 + x + y * row_size for x in range(a )] for y in range(a )]
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a ) )
# OR.. transpose(reverse_column(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a ) )
# OR.. reverse_column(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a ) )
# OR.. transpose(reverse_row(matrix))
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : Any = [list(a ) for x in zip(*a )]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : List[str] = matrix[::-1]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase_ : str = [x[::-1] for x in matrix]
return matrix
def _UpperCAmelCase ( a : list[list[int]] ) -> None:
"""simple docstring"""
for i in matrix:
print(*a )
if __name__ == "__main__":
A: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A: List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A: List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 7 | 0 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed ) | 714 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7 | 0 |
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_0_0, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 715 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
A: Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : List[str] = git.Repo(search_parent_directories=a )
lowercase_ : Union[str, Any] = {
'repo_id': str(a ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a , 'git_log.json' ) , 'w' ) as f:
json.dump(a , a , indent=4 )
def _UpperCAmelCase ( a : str ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
lowercase_ : int = 0
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase_ : Dict = int(os.environ['WORLD_SIZE'] )
lowercase_ : Union[str, Any] = int(os.environ['N_GPU_NODE'] )
lowercase_ : Optional[int] = int(os.environ['RANK'] )
# number of nodes / node ID
lowercase_ : int = params.world_size // params.n_gpu_per_node
lowercase_ : str = params.global_rank // params.n_gpu_per_node
lowercase_ : Dict = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase_ : str = 1
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : List[Any] = 0
lowercase_ : int = 1
lowercase_ : Tuple = 1
lowercase_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase_ : List[str] = params.node_id == 0 and params.local_rank == 0
lowercase_ : Optional[Any] = params.n_nodes > 1
# summary
lowercase_ : int = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def _UpperCAmelCase ( a : Dict ) -> Optional[int]:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 7 | 0 |
'''simple docstring'''
import os
import numpy
import onnx
def _UpperCAmelCase ( a : Optional[Any] , a : Any ) -> List[str]:
"""simple docstring"""
lowercase_ : Optional[Any] = a.name
lowercase_ : List[str] = b.name
lowercase_ : int = ''
lowercase_ : Dict = ''
lowercase_ : Optional[Any] = a == b
lowercase_ : Optional[Any] = name_a
lowercase_ : Optional[Any] = name_b
return res
def _UpperCAmelCase ( a : Any , a : Optional[Any] , a : Any ) -> Union[str, Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a , a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
_graph_replace_input_with(node_proto.attribute[1].g , a , a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : Any ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(a , a , a )
def _UpperCAmelCase ( a : List[Any] , a : Dict , a : str ) -> List[Any]:
"""simple docstring"""
lowercase_ : Tuple = list(model.graph.initializer )
lowercase_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase_ : List[str] = inits[i].name
lowercase_ : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a , a )
def _UpperCAmelCase ( a : List[Any] ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.path.dirname(a )
lowercase_ : List[str] = os.path.basename(a )
lowercase_ : Dict = onnx.load(os.path.join(a , a ) )
lowercase_ : Optional[int] = list(model.graph.initializer )
lowercase_ : Tuple = set()
lowercase_ : int = {}
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = 0
for i in range(len(a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a )
dup_set.add(a )
lowercase_ : Any = inits[j].data_type
lowercase_ : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , a )
total_reduced_size += mem_size
lowercase_ : List[str] = inits[i].name
lowercase_ : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a )
else:
lowercase_ : str = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
lowercase_ : Optional[Any] = sorted(a )
_remove_dup_initializers_from_model(a , a , a )
lowercase_ : Dict = 'optimized_' + model_file_name
lowercase_ : str = os.path.join(a , a )
onnx.save(a , a )
return new_model
| 716 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _UpperCAmelCase ( a : Any , a : int ) -> Any:
"""simple docstring"""
for e in env_keys:
lowercase_ : Optional[Any] = int(os.environ.get(a , -1 ) )
if val >= 0:
return val
return default
def _UpperCAmelCase ( a : List[Any] , a : Dict=False ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = os.environ.get(a , str(a ) )
return strtobool(a ) == 1 # As its name indicates `strtobool` actually returns an int...
def _UpperCAmelCase ( a : List[Any] , a : Dict="no" ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = os.environ.get(a , str(a ) )
return value
| 7 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 717 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
A: int = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
A: List[str] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
A: Union[str, Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : Dict , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> str:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ : Union[str, Any] = new_id
# turn into Numpy arrays
lowercase_ : List[Any] = np.array(a )
lowercase_ : Optional[Any] = np.array(a )
if reduce_labels:
lowercase_ : Any = 2_5_5
lowercase_ : Dict = label - 1
lowercase_ : List[Any] = 2_5_5
lowercase_ : Any = label != ignore_index
lowercase_ : List[Any] = np.not_equal(a , a )
lowercase_ : Optional[int] = pred_label[mask]
lowercase_ : Union[str, Any] = np.array(a )[mask]
lowercase_ : Optional[int] = pred_label[pred_label == label]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[int] = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Dict = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
lowercase_ : Optional[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( a : int , a : Optional[Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[str] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
lowercase_ : str = {}
lowercase_ : str = total_area_intersect.sum() / total_area_label.sum()
lowercase_ : Optional[Any] = total_area_intersect / total_area_union
lowercase_ : List[Any] = total_area_intersect / total_area_label
lowercase_ : Any = np.nanmean(a )
lowercase_ : Optional[Any] = np.nanmean(a )
lowercase_ : int = all_acc
lowercase_ : Union[str, Any] = iou
lowercase_ : Optional[Any] = acc
if nan_to_num is not None:
lowercase_ : Optional[int] = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> Tuple:
lowercase_ : Optional[int] = mean_iou(
results=_lowercase , gt_seg_maps=_lowercase , num_labels=_lowercase , ignore_index=_lowercase , nan_to_num=_lowercase , label_map=_lowercase , reduce_labels=_lowercase , )
return iou_result
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A: int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Dict = logging.get_logger(__name__)
A: Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'vit'
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-1_2 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=16 , **_lowercase , ) -> List[str]:
super().__init__(**_lowercase )
lowercase_ : Optional[int] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Any = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : str = qkv_bias
lowercase_ : List[str] = encoder_stride
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = version.parse('1.11' )
@property
def lowerCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self ) -> float:
return 1E-4
| 7 | 0 |
import math
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
if (
not isinstance(a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _UpperCAmelCase ( a : float , a : float ) -> float:
"""simple docstring"""
if (
not isinstance(a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: int = logging.get_logger(__name__)
A: int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'gpt_bigcode'
SCREAMING_SNAKE_CASE_ : int = ['past_key_values']
SCREAMING_SNAKE_CASE_ : Any = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowercase=5_0257 , _lowercase=1024 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=None , _lowercase="gelu_pytorch_tanh" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=5_0256 , _lowercase=5_0256 , _lowercase=True , _lowercase=True , _lowercase=True , **_lowercase , ) -> Any:
lowercase_ : Tuple = vocab_size
lowercase_ : str = n_positions
lowercase_ : List[str] = n_embd
lowercase_ : str = n_layer
lowercase_ : Optional[Any] = n_head
lowercase_ : Optional[int] = n_inner
lowercase_ : Union[str, Any] = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Optional[Any] = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Optional[int] = initializer_range
lowercase_ : List[Any] = scale_attn_weights
lowercase_ : Any = use_cache
lowercase_ : List[str] = attention_softmax_in_fpaa
lowercase_ : Any = scale_attention_softmax_in_fpaa
lowercase_ : Optional[Any] = multi_query
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self , _lowercase , ) -> int:
lowercase_ : Dict = parent
lowercase_ : Dict = 13
lowercase_ : Optional[Any] = 7
lowercase_ : Union[str, Any] = True
lowercase_ : str = True
lowercase_ : Optional[Any] = True
lowercase_ : List[Any] = 99
lowercase_ : Union[str, Any] = 32
lowercase_ : str = 2
lowercase_ : Optional[int] = 4
lowercase_ : Optional[int] = 37
lowercase_ : Any = 'gelu'
lowercase_ : Tuple = 0.1
lowercase_ : List[Any] = 0.1
lowercase_ : int = 512
lowercase_ : Tuple = 16
lowercase_ : Any = 2
lowercase_ : Tuple = 0.02
lowercase_ : Union[str, Any] = 3
lowercase_ : int = 4
lowercase_ : Optional[Any] = None
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Tuple = None
if self.use_input_mask:
lowercase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : str = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ) -> Union[str, Any]:
(
lowercase_
) : Any = self.prepare_config_and_inputs()
lowercase_ : Dict = True
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
lowercase_ : int = TFEsmModel(config=_lowercase )
lowercase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase_ : Tuple = model(_lowercase )
lowercase_ : Dict = [input_ids, input_mask]
lowercase_ : List[Any] = model(_lowercase )
lowercase_ : Tuple = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
lowercase_ : Union[str, Any] = True
lowercase_ : Tuple = TFEsmModel(config=_lowercase )
lowercase_ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowercase_ : Optional[int] = model(_lowercase )
lowercase_ : str = [input_ids, input_mask]
lowercase_ : Union[str, Any] = model(_lowercase , encoder_hidden_states=_lowercase )
# Also check the case where encoder outputs are not passed
lowercase_ : List[Any] = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
lowercase_ : Optional[Any] = TFEsmForMaskedLM(config=_lowercase )
lowercase_ : List[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Dict = TFEsmForTokenClassification(config=_lowercase )
lowercase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask}
lowercase_ : int = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
lowercase_
) : List[str] = config_and_inputs
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Optional[int] = TFEsmModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCamelCase__ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = TFEsmModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCamelCase__ ( self ) -> Tuple:
pass
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(_lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase_ : Any = model.get_bias()
assert isinstance(_lowercase , _lowercase )
for k, v in name.items():
assert isinstance(_lowercase , tf.Variable )
else:
lowercase_ : Union[str, Any] = model.get_output_embeddings()
assert x is None
lowercase_ : Tuple = model.get_bias()
assert name is None
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : Optional[int] = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowercase_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Tuple = model(_lowercase )[0]
lowercase_ : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _lowercase )
# compare the actual values for a slice.
lowercase_ : Union[str, Any] = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : Dict = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowercase_ : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : str = model(_lowercase )[0]
# compare the actual values for a slice.
lowercase_ : Optional[int] = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 720 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : Tuple = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : Tuple = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Any:
lowercase_ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[Any]:
lowercase_ : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowercase ) )
def lowerCamelCase__ ( self ) -> Optional[int]:
lowercase_ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
# pass variant but use the non-variant filenames
lowercase_ : Optional[int] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> Union[str, Any]:
lowercase_ : int = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> int:
lowercase_ : str = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
lowercase_ : str = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
# pass variant but use the non-variant filenames
lowercase_ : List[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Union[str, Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ : Dict = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowercase , variant=_lowercase ) )
| 7 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : str ) -> str:
"""simple docstring"""
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
lowercase_ : Any = ''
while len(a ) % 3 != 0:
lowercase_ : str = '0' + bin_string
lowercase_ : Any = [
bin_string[index : index + 3]
for index in range(len(a ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase_ : Optional[Any] = 0
for index, val in enumerate(a ):
oct_val += int(2 ** (2 - index) * int(a ) )
oct_string += str(a )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721 |
'''simple docstring'''
import argparse
A: List[Any] = "docs/source/_static/js/custom.js"
def _UpperCAmelCase ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(a , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[Any] = f.readlines()
lowercase_ : Dict = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowercase_ : Dict = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
A: List[str] = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Optional[int]=18 , UpperCAmelCase : str=30 , UpperCAmelCase : Dict=400 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : Optional[Any]=True , ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple =parent
lowercase : List[Any] =batch_size
lowercase : Tuple =num_channels
lowercase : Any =image_size
lowercase : Dict =min_resolution
lowercase : Optional[int] =max_resolution
lowercase : List[str] =do_resize
lowercase : Optional[int] =size_divisor
lowercase : List[Any] =do_rescale
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = GLPNImageProcessor if is_vision_available() else None
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =GLPNImageProcessingTester(self )
@property
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowercase : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''resample''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_rescale''' ) )
def A__ ( self : int ) -> str:
'''simple docstring'''
pass
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : int =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = FlaxAutoencoderKL
@property
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : int =4
lowercase : Optional[Any] =3
lowercase : Dict =(32, 32)
lowercase : int =jax.random.PRNGKey(0 )
lowercase : List[Any] =jax.random.uniform(UpperCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Any ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowercase : Tuple =self.dummy_input
return init_dict, inputs_dict
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''lilt'''
def __init__( self : Tuple , UpperCAmelCase : Any=3_0522 , UpperCAmelCase : Dict=768 , UpperCAmelCase : int=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Dict=3072 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Dict=1e-12 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : Any=None , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=1024 , **UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
lowercase : List[Any] =vocab_size
lowercase : Dict =hidden_size
lowercase : Optional[Any] =num_hidden_layers
lowercase : int =num_attention_heads
lowercase : str =hidden_act
lowercase : Optional[Any] =intermediate_size
lowercase : Optional[int] =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Union[str, Any] =max_position_embeddings
lowercase : Optional[Any] =type_vocab_size
lowercase : int =initializer_range
lowercase : int =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : Dict =classifier_dropout
lowercase : str =channel_shrink_ratio
lowercase : Any =max_ad_position_embeddings
| 8 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Tuple =0
lowercase : Optional[int] =len(__A )
for i in range(n - 1 ):
for j in range(i + 1 , __A ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase_ ( __A : Dict ) -> Dict:
"""simple docstring"""
if len(__A ) <= 1:
return arr, 0
lowercase : Optional[int] =len(__A ) // 2
lowercase : Optional[Any] =arr[0:mid]
lowercase : Optional[int] =arr[mid:]
lowercase , lowercase : str =count_inversions_recursive(__A )
lowercase , lowercase : Optional[Any] =count_inversions_recursive(__A )
lowercase , lowercase : Union[str, Any] =_count_cross_inversions(__A , __A )
lowercase : str =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase_ ( __A : List[Any] , __A : Dict ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(__A ) and j < len(__A ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__A ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__A ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase_ ( ) -> Dict:
"""simple docstring"""
lowercase : Tuple =[1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase : Optional[Any] =count_inversions_bf(__A )
lowercase , lowercase : Union[str, Any] =count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __A )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase : List[Any] =count_inversions_bf(__A )
lowercase , lowercase : Optional[int] =count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __A )
# an empty list should also have zero inversions
lowercase : str =[]
lowercase : Optional[Any] =count_inversions_bf(__A )
lowercase , lowercase : Optional[int] =count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __A )
if __name__ == "__main__":
main()
| 8 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 6_5536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase : List[str] =sample_size
# time
if time_embedding_type == "fourier":
lowercase : List[str] =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowercase : Union[str, Any] =2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase : Optional[int] =Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowercase : List[str] =block_out_channels[0]
if use_timestep_embedding:
lowercase : Optional[int] =block_out_channels[0] * 4
lowercase : Optional[int] =TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowercase : Dict =nn.ModuleList([] )
lowercase : Any =None
lowercase : Tuple =nn.ModuleList([] )
lowercase : int =None
# down
lowercase : Union[str, Any] =in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowercase : Tuple =output_channel
lowercase : Optional[Any] =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase : Union[str, Any] =i == len(UpperCAmelCase ) - 1
lowercase : Union[str, Any] =get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowercase : List[Any] =get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowercase : List[Any] =list(reversed(UpperCAmelCase ) )
lowercase : Tuple =reversed_block_out_channels[0]
if out_block_type is None:
lowercase : str =out_channels
else:
lowercase : Any =block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowercase : Dict =output_channel
lowercase : Optional[int] =(
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowercase : Dict =i == len(UpperCAmelCase ) - 1
lowercase : Union[str, Any] =get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowercase : List[str] =output_channel
# out
lowercase : List[Any] =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase : Optional[Any] =get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A__ ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
lowercase : str =timestep
if not torch.is_tensor(UpperCAmelCase ):
lowercase : Optional[Any] =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase : int =timesteps[None].to(sample.device )
lowercase : int =self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowercase : str =self.time_mlp(UpperCAmelCase )
else:
lowercase : Any =timestep_embed[..., None]
lowercase : Union[str, Any] =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase : str =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase : Tuple =()
for downsample_block in self.down_blocks:
lowercase , lowercase : str =downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase : Union[str, Any] =self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase : str =down_block_res_samples[-1:]
lowercase : Tuple =down_block_res_samples[:-1]
lowercase : Any =upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowercase : str =self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 8 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
lowercase : int =gray_code_sequence_string(__A )
#
# convert them to integers
for i in range(len(__A ) ):
lowercase : Tuple =int(sequence[i] , 2 )
return sequence
def lowercase_ ( __A : int ) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase : Tuple =1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase : Union[str, Any] =gray_code_sequence_string(bit_count - 1 )
lowercase : Optional[Any] =[]
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase : Union[str, Any] ='''0''' + smaller_sequence[i]
sequence.append(__A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase : Tuple ='''1''' + smaller_sequence[i]
sequence.append(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase_ ( __A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =SwinConfig(image_size=1_9_2 )
if "base" in model_name:
lowercase : Dict =6
lowercase : Union[str, Any] =1_2_8
lowercase : int =(2, 2, 1_8, 2)
lowercase : str =(4, 8, 1_6, 3_2)
elif "large" in model_name:
lowercase : List[str] =1_2
lowercase : Optional[Any] =1_9_2
lowercase : int =(2, 2, 1_8, 2)
lowercase : int =(6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase : Union[str, Any] =window_size
lowercase : List[Any] =embed_dim
lowercase : Union[str, Any] =depths
lowercase : List[Any] =num_heads
return config
def lowercase_ ( __A : Tuple ) -> List[str]:
"""simple docstring"""
if "encoder.mask_token" in name:
lowercase : List[Any] =name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase : List[Any] =name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase : Union[str, Any] =name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Any =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : int =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : List[str] =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase : List[Any] ='''layernorm.weight'''
if name == "encoder.norm.bias":
lowercase : Union[str, Any] ='''layernorm.bias'''
if "decoder" in name:
pass
else:
lowercase : Optional[int] ='''swin.''' + name
return name
def lowercase_ ( __A : Optional[Any] , __A : List[Any] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : Tuple =orig_state_dict.pop(__A )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase : int =key.split('''.''' )
lowercase : Union[str, Any] =int(key_split[2] )
lowercase : Any =int(key_split[4] )
lowercase : str =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Tuple =val[:dim, :]
lowercase : str =val[
dim : dim * 2, :
]
lowercase : Optional[int] =val[-dim:, :]
else:
lowercase : int =val[
:dim
]
lowercase : List[Any] =val[
dim : dim * 2
]
lowercase : Optional[Any] =val[
-dim:
]
else:
lowercase : Optional[Any] =val
return orig_state_dict
def lowercase_ ( __A : List[str] , __A : str , __A : str , __A : List[Any] ) -> int:
"""simple docstring"""
lowercase : Any =torch.load(__A , map_location='''cpu''' )['''model''']
lowercase : Optional[int] =get_swin_config(__A )
lowercase : Optional[Any] =SwinForMaskedImageModeling(__A )
model.eval()
lowercase : Optional[Any] =convert_state_dict(__A , __A )
model.load_state_dict(__A )
lowercase : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Union[str, Any] =ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
lowercase : Any =Image.open(requests.get(__A , stream=__A ).raw )
lowercase : Any =image_processor(images=__A , return_tensors='''pt''' )
with torch.no_grad():
lowercase : Tuple =model(**__A ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Tuple =tempfile.mkdtemp()
lowercase : str =5
# Realm tok
lowercase : Optional[int] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : Optional[int] =os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowercase : Optional[Any] =os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase : Dict =os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
def A__ ( self : Any ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =RealmConfig(num_block_records=self.num_block_records )
return config
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase , )
return block_records
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.get_config()
lowercase : Tuple =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Any =np.array([0, 3] , dtype='''long''' )
lowercase : Optional[int] =tokenizer(['''Test question'''] ).input_ids
lowercase : Optional[int] =tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : List[Any] =config.reader_seq_len
lowercase , lowercase , lowercase , lowercase : Dict =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : List[str] =self.get_config()
lowercase : Optional[Any] =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Optional[Any] =np.array([0, 3, 5] , dtype='''long''' )
lowercase : Union[str, Any] =tokenizer(['''Test question'''] ).input_ids
lowercase : Tuple =tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : Tuple =config.reader_seq_len
lowercase , lowercase , lowercase , lowercase : int =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowercase : List[Any] =retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowercase : Dict =os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase : Dict =RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE = data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE = data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE = data_utils
SCREAMING_SNAKE_CASE = data_utils
def lowercase_ ( __A : Dict , __A : Any , __A : Optional[int] , __A : List[str] ) -> Tuple:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
lowercase : List[str] =pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase : Optional[Any] =pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
lowercase : Dict =corpus.vocab.__dict__
torch.save(__A , __A )
lowercase : Optional[int] =corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
lowercase : Dict =pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase : Dict =os.path.abspath(__A )
lowercase : str =os.path.abspath(__A )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase : Any =TransfoXLConfig()
else:
lowercase : str =TransfoXLConfig.from_json_file(__A )
print(F'Building PyTorch model from configuration: {config}' )
lowercase : List[str] =TransfoXLLMHeadModel(__A )
lowercase : Dict =load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
lowercase : int =os.path.join(__A , __A )
lowercase : Union[str, Any] =os.path.join(__A , __A )
print(F'Save PyTorch model to {os.path.abspath(__A )}' )
torch.save(model.state_dict() , __A )
print(F'Save configuration file to {os.path.abspath(__A )}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 1 |
'''simple docstring'''
import math
import sys
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
if number != int(__A ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
lowercase : List[str] =[-1] * (number + 1)
lowercase : Dict =0
for i in range(1 , number + 1 ):
lowercase : Union[str, Any] =sys.maxsize
lowercase : List[Any] =int(math.sqrt(__A ) )
for j in range(1 , root + 1 ):
lowercase : Optional[Any] =1 + answers[i - (j**2)]
lowercase : List[str] =min(__A , __A )
lowercase : Dict =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 1 |
'''simple docstring'''
import requests
SCREAMING_SNAKE_CASE = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowercase_ ( __A : str ) -> None:
"""simple docstring"""
lowercase : Tuple =requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 8 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[Any]=18 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : str=400 , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : List[Any]=True , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =size if size is not None else {'''height''': 18, '''width''': 18}
lowercase : Any =parent
lowercase : Optional[Any] =batch_size
lowercase : List[Any] =num_channels
lowercase : str =image_size
lowercase : Optional[Any] =min_resolution
lowercase : Optional[Any] =max_resolution
lowercase : Any =do_resize
lowercase : Optional[int] =size
lowercase : Tuple =apply_ocr
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : Any =LayoutLMvaImageProcessingTester(self )
@property
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''apply_ocr''' ) )
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
lowercase : Any =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowercase : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase )
self.assertIsInstance(encoding.boxes , UpperCAmelCase )
# Test batched
lowercase : Optional[Any] =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : Any =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowercase : str =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase : List[str] =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
lowercase : str =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
lowercase : Union[str, Any] =image_processing(UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase : Optional[Any] =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
lowercase : Any =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase )
self.assertListEqual(encoding.boxes , UpperCAmelCase )
# with apply_OCR = False
lowercase : Tuple =LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase )
lowercase : str =image_processing(UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 8 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCAmelCase ):
lowercase : List[str] =AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase : Dict =FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCAmelCase ):
lowercase : int =AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[int] =FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase : List[str] =AutoTokenizer.from_pretrained(UpperCAmelCase )
lowercase : List[str] =FlaxBertModel.from_pretrained(UpperCAmelCase )
lowercase : Dict =tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : Optional[int] ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
@slow
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase : Optional[Any] =AutoTokenizer.from_pretrained(UpperCAmelCase )
lowercase : Tuple =FlaxRobertaModel.from_pretrained(UpperCAmelCase )
lowercase : Tuple =tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : Optional[int] ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
def A__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase : Optional[int] =FlaxAutoModel.from_pretrained('''bert-base''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Optional[int] =FlaxAutoModel.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
lowercase : Union[str, Any] =FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(UpperCAmelCase , '''Use `from_pt=True` to load this model''' ):
lowercase : str =FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __A : List[str] , __A : Tuple , __A : Optional[int] , __A : Tuple , __A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[str] =TapasConfig.from_json_file(__A )
# set absolute/relative position embeddings parameter
lowercase : List[str] =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase : Optional[int] =TapasForQuestionAnswering(config=__A )
elif task == "WTQ":
# run_task_main.py hparams
lowercase : List[Any] =4
lowercase : List[Any] =True
# hparam_utils.py hparams
lowercase : Dict =0.664694
lowercase : Any =0.207951
lowercase : List[str] =0.121194
lowercase : List[Any] =True
lowercase : Any =True
lowercase : Dict =False
lowercase : Union[str, Any] =0.0352513
lowercase : Any =TapasForQuestionAnswering(config=__A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase : str =4
lowercase : List[Any] =False
# hparam_utils.py hparams
lowercase : Tuple =36.4519
lowercase : Optional[Any] =0.903421
lowercase : Tuple =222.088
lowercase : Union[str, Any] =True
lowercase : str =True
lowercase : List[str] =True
lowercase : str =0.763141
lowercase : Union[str, Any] =TapasForQuestionAnswering(config=__A )
elif task == "TABFACT":
lowercase : Any =TapasForSequenceClassification(config=__A )
elif task == "MLM":
lowercase : Optional[Any] =TapasForMaskedLM(config=__A )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase : List[str] =TapasModel(config=__A )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__A , __A , __A )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__A )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
lowercase : int =TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''' , model_max_length=5_1_2 )
tokenizer.save_pretrained(__A )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
lowercase : List[Any] =right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''CLIPImageProcessor'''
UpperCamelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Tuple =kwargs.pop('''feature_extractor''' )
lowercase : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : int=None , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase : int =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
lowercase : Union[str, Any] =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
lowercase : Optional[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A__ ( self : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.tokenizer.model_input_names
lowercase : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : list ) -> list:
"""simple docstring"""
if len(__A ) <= 1:
return [tuple(__A )]
lowercase : List[str] =[]
def generate(__A : int , __A : list ):
lowercase : Any =[0] * n
res.append(tuple(__A ) )
lowercase : List[Any] =0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowercase , lowercase : Tuple =arr[i], arr[0]
else:
lowercase , lowercase : Union[str, Any] =arr[i], arr[c[i]]
res.append(tuple(__A ) )
c[i] += 1
lowercase : Optional[Any] =0
else:
lowercase : List[str] =0
i += 1
generate(len(__A ) , __A )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 8 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
SCREAMING_SNAKE_CASE = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : Optional[int] =[0] * len(__A )
lowercase : List[str] =[]
lowercase : List[str] =[1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
lowercase : List[Any] =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase : Dict =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase_ ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def A__ ( UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def A__ ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase : str =kwargs.pop('''main_process_only''' , UpperCAmelCase )
lowercase : int =kwargs.pop('''in_order''' , UpperCAmelCase )
if self.isEnabledFor(UpperCAmelCase ):
if self._should_log(UpperCAmelCase ):
lowercase , lowercase : Optional[int] =self.process(UpperCAmelCase , UpperCAmelCase )
self.logger.log(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
elif in_order:
lowercase : Union[str, Any] =PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Dict =self.process(UpperCAmelCase , UpperCAmelCase )
self.logger.log(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
state.wait_for_everyone()
def lowercase_ ( __A : str , __A : str = None ) -> Optional[int]:
"""simple docstring"""
if log_level is None:
lowercase : Union[str, Any] =os.environ.get('''ACCELERATE_LOG_LEVEL''' , __A )
lowercase : Tuple =logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A , {} )
| 8 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : str , __A : str ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] =RobertaPreLayerNormConfig.from_pretrained(
__A , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase : List[str] =torch.load(hf_hub_download(repo_id=__A , filename='''pytorch_model.bin''' ) )
lowercase : int ={}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase : List[Any] ='''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase : str =tensor_value
lowercase : List[Any] =RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__A , config=__A , state_dict=__A )
model.save_pretrained(__A )
# convert tokenizer
lowercase : List[str] =AutoTokenizer.from_pretrained(__A )
tokenizer.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 8 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 1 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE = 'Normal'
if result[0][0] == 1:
SCREAMING_SNAKE_CASE = 'Abnormality detected'
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
UpperCamelCase_ = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase_ = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
UpperCamelCase_ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
UpperCamelCase_ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
UpperCamelCase_ = field(
default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
UpperCamelCase_ = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
UpperCamelCase_ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
UpperCamelCase_ = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
UpperCamelCase_ = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
UpperCamelCase_ = field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} )
UpperCamelCase_ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase_ = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
UpperCamelCase_ = field(default=1 , metadata={'''help''': '''Training seed.'''} )
UpperCamelCase_ = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase_ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
UpperCamelCase_ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase_ = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
UpperCamelCase_ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
UpperCamelCase_ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
UpperCamelCase_ = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
UpperCamelCase_ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
UpperCamelCase_ = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
UpperCamelCase_ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
UpperCamelCase_ = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
UpperCamelCase_ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase_ = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase_ = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
UpperCamelCase_ = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default=__A , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
UpperCamelCase_ = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
UpperCamelCase_ = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
UpperCamelCase_ = field(
default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
UpperCamelCase_ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase_ = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
UpperCamelCase_ = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
UpperCamelCase_ = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
UpperCamelCase_ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase_ = field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
UpperCamelCase_ = field(
default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
UpperCamelCase_ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
UpperCamelCase_ = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
UpperCamelCase_ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
import numpy as np
import datasets
SCREAMING_SNAKE_CASE = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
SCREAMING_SNAKE_CASE = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
SCREAMING_SNAKE_CASE = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : str ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =np.array(UpperCAmelCase )
lowercase : Optional[int] =np.array(UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowercase : Optional[Any] =X - np.mean(UpperCAmelCase )
lowercase : List[Any] =np.cov(reference_distribution.T )
try:
lowercase : str =np.linalg.inv(UpperCAmelCase )
except np.linalg.LinAlgError:
lowercase : Union[str, Any] =np.linalg.pinv(UpperCAmelCase )
lowercase : str =np.dot(UpperCAmelCase , UpperCAmelCase )
lowercase : Tuple =np.dot(UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 8 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 1 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE = '\\n Text data.\n Second line of data.'
SCREAMING_SNAKE_CASE = 'file'
@pytest.fixture(scope='''session''' )
def lowercase_ ( __A : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase : Dict =bytes(__A , '''utf-8''' )
with zstd.open(__A , '''wb''' ) as f:
f.write(__A )
return path
@pytest.fixture
def lowercase_ ( __A : Any ) -> int:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __A ) , '''w''' ) as f:
f.write(__A )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowercase_ ( __A : int , __A : int , __A : Any , __A : List[Any] , __A : Dict , __A : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase : List[Any] =input_paths[compression_format]
lowercase : Optional[Any] =tmp_path / '''cache'''
lowercase : List[Any] =DownloadConfig(cache_dir=__A , extract_compressed_file=__A )
lowercase : List[str] =cached_path(__A , download_config=__A )
with open(__A ) as f:
lowercase : Any =f.read()
with open(__A ) as f:
lowercase : int =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowercase_ ( __A : Union[str, Any] , __A : List[Any] , __A : List[Any] , __A : Optional[Any] , __A : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict ='''custom_cache'''
lowercase : int ='''custom_extracted_dir'''
lowercase : Optional[Any] =tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase : Tuple =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __A )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__A ) )
lowercase : List[Any] =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase : str =xz_file
lowercase : str =(
DownloadConfig(extract_compressed_file=__A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__A )
)
lowercase : Tuple =cached_path(__A , download_config=__A )
assert Path(__A ).parent.parts[-2:] == expected
def lowercase_ ( __A : int ) -> Any:
"""simple docstring"""
lowercase : Tuple =str(Path(__A ).resolve() )
assert cached_path(__A ) == text_file
# relative path
lowercase : Optional[Any] =str(Path(__A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__A ) == text_file
def lowercase_ ( __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] =str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__A ):
cached_path(__A )
# relative path
lowercase : List[str] ='''./__missing_file__.txt'''
with pytest.raises(__A ):
cached_path(__A )
def lowercase_ ( __A : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] =get_from_cache(F'tmp://{tmpfs_file}' )
with open(__A ) as f:
lowercase : Optional[int] =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
with pytest.raises(__A ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
http_get('''https://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase : str =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
ftp_get('''ftp://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( __A : List[str] ) -> Any:
"""simple docstring"""
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
fsspec_get('''s3://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
fsspec_head('''s3://huggingface.co''' )
| 8 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger()
def lowercase_ ( __A : int , __A : str , __A : LevitConfig , __A : Path , __A : bool = True ) -> int:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
lowercase : Tuple =timm.create_model('''levit_128s''' , pretrained=__A )
else:
lowercase : int =timm.create_model('''levit_128''' , pretrained=__A )
if hidden_sizes == 1_9_2:
lowercase : List[Any] =timm.create_model('''levit_192''' , pretrained=__A )
if hidden_sizes == 2_5_6:
lowercase : str =timm.create_model('''levit_256''' , pretrained=__A )
if hidden_sizes == 3_8_4:
lowercase : Optional[int] =timm.create_model('''levit_384''' , pretrained=__A )
from_model.eval()
lowercase : List[str] =LevitForImageClassificationWithTeacher(__A ).eval()
lowercase : Optional[Any] =OrderedDict()
lowercase : Any =from_model.state_dict()
lowercase : Union[str, Any] =list(from_model.state_dict().keys() )
lowercase : List[str] =list(our_model.state_dict().keys() )
print(len(__A ) , len(__A ) )
for i in range(len(__A ) ):
lowercase : int =weights[og_keys[i]]
our_model.load_state_dict(__A )
lowercase : Dict =torch.randn((2, 3, 2_2_4, 2_2_4) )
lowercase : Optional[int] =from_model(__A )
lowercase : Union[str, Any] =our_model(__A ).logits
assert torch.allclose(__A , __A ), "The model logits don't match the original one."
lowercase : List[Any] =name
print(__A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase : Tuple =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def lowercase_ ( __A : Path , __A : str = None , __A : bool = True ) -> str:
"""simple docstring"""
lowercase : Tuple ='''imagenet-1k-id2label.json'''
lowercase : List[Any] =1_0_0_0
lowercase : List[str] =(1, num_labels)
lowercase : int ='''huggingface/label-files'''
lowercase : Any =num_labels
lowercase : Optional[int] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Union[str, Any] ={int(__A ): v for k, v in idalabel.items()}
lowercase : int =idalabel
lowercase : Union[str, Any] ={v: k for k, v in idalabel.items()}
lowercase : Optional[Any] =partial(__A , num_labels=__A , idalabel=__A , labelaid=__A )
lowercase : Dict ={
'''levit-128S''': 1_2_8,
'''levit-128''': 1_2_8,
'''levit-192''': 1_9_2,
'''levit-256''': 2_5_6,
'''levit-384''': 3_8_4,
}
lowercase : Any ={
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __A , names_to_config[model_name] , __A , __A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __A , __A , __A , __A )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 8 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.encodec')
SCREAMING_SNAKE_CASE = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
SCREAMING_SNAKE_CASE = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
SCREAMING_SNAKE_CASE = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
SCREAMING_SNAKE_CASE = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
SCREAMING_SNAKE_CASE = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def lowercase_ ( __A : int , __A : Union[str, Any] , __A : Optional[int] , __A : List[str] , __A : Any ) -> str:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase : List[Any] =getattr(__A , __A )
if weight_type is not None:
lowercase : List[str] =getattr(__A , __A ).shape
else:
lowercase : Optional[int] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowercase : List[Any] =value
elif weight_type == "weight_g":
lowercase : Optional[int] =value
elif weight_type == "weight_v":
lowercase : Any =value
elif weight_type == "bias":
lowercase : Optional[Any] =value
elif weight_type == "running_mean":
lowercase : List[str] =value
elif weight_type == "running_var":
lowercase : Optional[int] =value
elif weight_type == "num_batches_tracked":
lowercase : List[Any] =value
elif weight_type == "weight_ih_l0":
lowercase : Any =value
elif weight_type == "weight_hh_l0":
lowercase : str =value
elif weight_type == "bias_ih_l0":
lowercase : Union[str, Any] =value
elif weight_type == "bias_hh_l0":
lowercase : Tuple =value
elif weight_type == "weight_ih_l1":
lowercase : Optional[Any] =value
elif weight_type == "weight_hh_l1":
lowercase : Union[str, Any] =value
elif weight_type == "bias_ih_l1":
lowercase : str =value
elif weight_type == "bias_hh_l1":
lowercase : Dict =value
else:
lowercase : Tuple =value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowercase_ ( __A : Optional[int] , __A : int ) -> Any:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase , lowercase : List[Any] =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( __A : Optional[int] , __A : Union[str, Any] , __A : Any ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple =[]
if model_name == "encodec_24khz" or "encodec_32khz":
lowercase : Optional[Any] =MAPPING_24K
elif model_name == "encodec_48khz":
lowercase : List[str] =MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(__A , __A ):
logger.info(F'{name} was ignored' )
continue
lowercase : Optional[int] =False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowercase , lowercase : Union[str, Any] =key.split('''.*.''' )
if prefix in name and suffix in name:
lowercase : Tuple =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowercase : int =True
if "*" in mapped_key:
lowercase : Optional[Any] =name.split(__A )[0].split('''.''' )[-2]
lowercase : Optional[Any] =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
lowercase : List[Any] ='''weight_g'''
elif "weight_v" in name:
lowercase : Any ='''weight_v'''
elif "weight_ih_l0" in name:
lowercase : List[Any] ='''weight_ih_l0'''
elif "weight_hh_l0" in name:
lowercase : List[Any] ='''weight_hh_l0'''
elif "bias_ih_l0" in name:
lowercase : Any ='''bias_ih_l0'''
elif "bias_hh_l0" in name:
lowercase : Optional[int] ='''bias_hh_l0'''
elif "weight_ih_l1" in name:
lowercase : Optional[Any] ='''weight_ih_l1'''
elif "weight_hh_l1" in name:
lowercase : List[str] ='''weight_hh_l1'''
elif "bias_ih_l1" in name:
lowercase : Optional[Any] ='''bias_ih_l1'''
elif "bias_hh_l1" in name:
lowercase : Optional[Any] ='''bias_hh_l1'''
elif "bias" in name:
lowercase : Dict ='''bias'''
elif "weight" in name:
lowercase : Optional[Any] ='''weight'''
elif "running_mean" in name:
lowercase : List[str] ='''running_mean'''
elif "running_var" in name:
lowercase : Dict ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : List[Any] ='''num_batches_tracked'''
else:
lowercase : Tuple =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def lowercase_ ( __A : int , __A : Any , __A : Optional[Any] , __A : Optional[int]=None , __A : List[Any]=None , ) -> Dict:
"""simple docstring"""
if config_path is not None:
lowercase : Dict =EncodecConfig.from_pretrained(__A )
else:
lowercase : Dict =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowercase : Dict =[8, 5, 4, 4]
lowercase : Any =[2.2]
lowercase : int =6_4
lowercase : int =3_2_0_0_0
lowercase : Optional[Any] =2_0_4_8
lowercase : int =False
lowercase : Tuple =False
lowercase : Any =False
elif model_name == "encodec_48khz":
lowercase : Union[str, Any] =[8, 5, 4, 2]
lowercase : Optional[Any] =[3.0, 6.0, 12.0, 24.0]
lowercase : Any =4_8_0_0_0
lowercase : Optional[Any] =2
lowercase : List[str] =False
lowercase : Tuple ='''time_group_norm'''
lowercase : Optional[int] =True
lowercase : Tuple =1.0
lowercase : Union[str, Any] =0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowercase : str =EncodecModel(__A )
lowercase : List[str] =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__A )
lowercase : List[Any] =torch.load(__A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowercase : int =original_checkpoint['''best_state''']
recursively_load_weights(__A , __A , __A )
model.save_pretrained(__A )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__A )
model.push_to_hub(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 8 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] ) -> None:
'''simple docstring'''
lowercase : dict[str, TrieNode] ={} # Mapping from char to TrieNode
lowercase : Optional[int] =False
def A__ ( self : Tuple , UpperCAmelCase : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : str ) -> None:
'''simple docstring'''
lowercase : Optional[int] =self
for char in word:
if char not in curr.nodes:
lowercase : Union[str, Any] =TrieNode()
lowercase : List[Any] =curr.nodes[char]
lowercase : Union[str, Any] =True
def A__ ( self : Union[str, Any] , UpperCAmelCase : str ) -> bool:
'''simple docstring'''
lowercase : List[Any] =self
for char in word:
if char not in curr.nodes:
return False
lowercase : Tuple =curr.nodes[char]
return curr.is_leaf
def A__ ( self : Dict , UpperCAmelCase : str ) -> None:
'''simple docstring'''
def _delete(UpperCAmelCase : TrieNode , UpperCAmelCase : str , UpperCAmelCase : int ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase : Union[str, Any] =False
return len(curr.nodes ) == 0
lowercase : Dict =word[index]
lowercase : Dict =curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase : Optional[int] =_delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def lowercase_ ( __A : TrieNode , __A : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(__A , end=''' ''' )
for key, value in node.nodes.items():
print_words(__A , word + key )
def lowercase_ ( ) -> bool:
"""simple docstring"""
lowercase : Union[str, Any] ='''banana bananas bandana band apple all beast'''.split()
lowercase : Dict =TrieNode()
root.insert_many(__A )
# print_words(root, "")
assert all(root.find(__A ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowercase_ ( __A : str , __A : bool ) -> None:
"""simple docstring"""
print(str(__A ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowercase_ ( ) -> None:
"""simple docstring"""
assert test_trie()
def lowercase_ ( ) -> None:
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : str , __A : Any=False ) -> Dict:
"""simple docstring"""
lowercase : Tuple =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Dict =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowercase_ ( __A : Tuple , __A : str , __A : Tuple=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : List[str] =''''''
else:
lowercase : str ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Dict =state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowercase : int =state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase : int =in_proj_weight[
: config.hidden_size, :
]
lowercase : Any =in_proj_bias[: config.hidden_size]
lowercase : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Tuple =in_proj_weight[
-config.hidden_size :, :
]
lowercase : int =in_proj_bias[-config.hidden_size :]
def lowercase_ ( __A : str ) -> Dict:
"""simple docstring"""
lowercase : int =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowercase_ ( __A : Optional[Any] , __A : str , __A : Any ) -> Any:
"""simple docstring"""
lowercase : Tuple =dct.pop(__A )
lowercase : Any =val
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : List[Any] =Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase_ ( __A : Tuple , __A : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =ViTConfig()
lowercase : Optional[int] =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase : Dict =True
lowercase : Union[str, Any] =int(vit_name[-1_2:-1_0] )
lowercase : Tuple =int(vit_name[-9:-6] )
else:
lowercase : Dict =1_0_0_0
lowercase : int ='''huggingface/label-files'''
lowercase : Optional[Any] ='''imagenet-1k-id2label.json'''
lowercase : Any =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : List[str] ={int(__A ): v for k, v in idalabel.items()}
lowercase : List[Any] =idalabel
lowercase : Tuple ={v: k for k, v in idalabel.items()}
lowercase : Optional[int] =int(vit_name[-6:-4] )
lowercase : int =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowercase : str =1_9_2
lowercase : List[str] =7_6_8
lowercase : int =1_2
lowercase : List[str] =3
elif vit_name[9:].startswith('''small''' ):
lowercase : Tuple =3_8_4
lowercase : Tuple =1_5_3_6
lowercase : Dict =1_2
lowercase : int =6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowercase : Union[str, Any] =7_6_8
lowercase : Optional[int] =2_3_0_4
lowercase : List[Any] =8
lowercase : Union[str, Any] =8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowercase : Optional[Any] =1_0_2_4
lowercase : List[Any] =4_0_9_6
lowercase : Union[str, Any] =2_4
lowercase : int =1_6
elif vit_name[4:].startswith('''huge''' ):
lowercase : Dict =1_2_8_0
lowercase : int =5_1_2_0
lowercase : Optional[int] =3_2
lowercase : int =1_6
# load original model from timm
lowercase : int =timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase : Optional[int] =timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
lowercase : Union[str, Any] =create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase : Tuple =ViTModel(__A ).eval()
else:
lowercase : Union[str, Any] =ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase : Optional[int] =DeiTImageProcessor(size=config.image_size )
else:
lowercase : int =ViTImageProcessor(size=config.image_size )
lowercase : str =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Dict =encoding['''pixel_values''']
lowercase : Optional[Any] =model(__A )
if base_model:
lowercase : str =timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 )
else:
lowercase : str =timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : UNetaDModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__()
lowercase : str =value_function
lowercase : Union[str, Any] =unet
lowercase : List[str] =scheduler
lowercase : Optional[Any] =env
lowercase : Any =env.get_dataset()
lowercase : Tuple ={}
for key in self.data.keys():
try:
lowercase : Any =self.data[key].mean()
except: # noqa: E722
pass
lowercase : Tuple ={}
for key in self.data.keys():
try:
lowercase : Tuple =self.data[key].std()
except: # noqa: E722
pass
lowercase : Optional[int] =env.observation_space.shape[0]
lowercase : Dict =env.action_space.shape[0]
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def A__ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def A__ ( self : str , UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if type(UpperCAmelCase ) is dict:
return {k: self.to_torch(UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase , device=self.unet.device )
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
for key, val in cond.items():
lowercase : Dict =val.clone()
return x_in
def A__ ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =x.shape[0]
lowercase : str =None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase : List[Any] =torch.full((batch_size,) , UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase : Dict =self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample
lowercase : List[Any] =torch.autograd.grad([y.sum()] , [x] )[0]
lowercase : Union[str, Any] =self.scheduler._get_variance(UpperCAmelCase )
lowercase : Any =torch.exp(0.5 * posterior_variance )
lowercase : str =model_std * grad
lowercase : Any =0
lowercase : List[Any] =x.detach()
lowercase : Optional[Any] =x + scale * grad
lowercase : Any =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Dict =self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase : Optional[int] =self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , predict_epsilon=UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase : Tuple =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Any =self.to_torch(UpperCAmelCase )
return x, y
def __call__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str]=64 , UpperCAmelCase : Dict=32 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[str]=0.1 ) -> Any:
'''simple docstring'''
lowercase : Tuple =self.normalize(UpperCAmelCase , '''observations''' )
lowercase : Dict =obs[None].repeat(UpperCAmelCase , axis=0 )
lowercase : str ={0: self.to_torch(UpperCAmelCase )}
lowercase : Any =(batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase : Tuple =randn_tensor(UpperCAmelCase , device=self.unet.device )
lowercase : Optional[Any] =self.reset_xa(UpperCAmelCase , UpperCAmelCase , self.action_dim )
lowercase : Dict =self.to_torch(UpperCAmelCase )
# run the diffusion process
lowercase , lowercase : Tuple =self.run_diffusion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# sort output trajectories by value
lowercase : int =y.argsort(0 , descending=UpperCAmelCase ).squeeze()
lowercase : Union[str, Any] =x[sorted_idx]
lowercase : Optional[int] =sorted_values[:, :, : self.action_dim]
lowercase : str =actions.detach().cpu().numpy()
lowercase : List[Any] =self.de_normalize(UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase : List[str] =0
else:
# if we didn't run value guiding, select a random action
lowercase : List[str] =np.random.randint(0 , UpperCAmelCase )
lowercase : List[Any] =denorm_actions[selected_index, 0]
return denorm_actions
| 8 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE = object()
def lowercase_ ( __A : str , __A : int ) -> Optional[Any]:
"""simple docstring"""
lowercase : str =tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
lowercase : Optional[int] =[x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def lowercase_ ( __A : Optional[int] ) -> Dict:
"""simple docstring"""
def replace(__A : str , __A : Tuple ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def lowercase_ ( ) -> Dict:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __A )),
(("transformer", "wte", "embedding"), P('''mp''' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase_ ( __A : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =_get_partition_rules()
lowercase : Optional[int] =_replacement_rules(__A )
lowercase : Dict ={k: _unmatched for k in flatten_dict(__A )}
lowercase : int ={k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) )
| 8 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Any=None , **UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , UpperCAmelCase , )
super().__init__(args=UpperCAmelCase , **UpperCAmelCase )
| 8 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowercase_ ( __A : Tuple ) -> int:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
lowercase : Optional[int] =k.replace(__A , __A )
return k
def lowercase_ ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
"""simple docstring"""
lowercase : List[str] =DEFAULTS.copy()
cfg_kwargs.update(__A )
lowercase : Dict =PegasusConfig(**__A )
lowercase : Optional[int] =PegasusForConditionalGeneration(__A )
lowercase : Union[str, Any] =torch_model.model.state_dict()
lowercase : Union[str, Any] ={}
for k, v in tf_weights.items():
lowercase : Dict =rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
lowercase : Union[str, Any] =v.T
lowercase : Any =torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
lowercase : int =torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowercase : Dict =mapping['''shared.weight''']
lowercase : List[str] =mapping['''shared.weight''']
lowercase : Optional[int] ={k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__A )
lowercase , lowercase : List[str] =torch_model.model.load_state_dict(__A , strict=__A )
lowercase : Optional[int] =[
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowercase_ ( __A : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
lowercase : Dict =tf.train.list_variables(__A )
lowercase : str ={}
lowercase : Union[str, Any] =['''Adafactor''', '''global_step''']
for name, shape in tqdm(__A , desc='''converting tf checkpoint to dict''' ):
lowercase : int =any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase : List[Any] =tf.train.load_variable(__A , __A )
lowercase : Tuple =array
return tf_weights
def lowercase_ ( __A : str , __A : str ) -> Dict:
"""simple docstring"""
lowercase : Any =Path(__A ).parent.name
lowercase : List[Any] =task_specific_params[F'summarization_{dataset}']['''max_position_embeddings''']
lowercase : List[str] =PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
lowercase : str =get_tf_weights_as_numpy(__A )
lowercase : List[Any] =task_specific_params[F'summarization_{dataset}']
if dataset == "large":
lowercase : Tuple =task_specific_params
lowercase : Optional[Any] =convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
lowercase : int =torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__A , Path(__A ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =mock.Mock()
lowercase : Dict =500
lowercase : Union[str, Any] ={}
lowercase : Optional[int] =HTTPError
lowercase : List[Any] ={}
# Download this model to make sure it's in the cache.
lowercase : str =WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase ) as mock_head:
lowercase : Any =WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A__ ( cls : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Any =TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def A__ ( cls : List[str] ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Tuple =WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowercase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase , repo_id='''test-feature-extractor''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : Union[str, Any] =WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowercase : int =WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : str =WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowercase : List[str] =CustomFeatureExtractor.from_pretrained(UpperCAmelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowercase : Dict =AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
SCREAMING_SNAKE_CASE = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
SCREAMING_SNAKE_CASE = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
SCREAMING_SNAKE_CASE = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : list , __A : list ) -> list:
"""simple docstring"""
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowercase : Optional[Any] =[
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase_ ( __A : list , __A : list ) -> Optional[Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def lowercase_ ( __A : list , __A : list ) -> Any:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def lowercase_ ( __A : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowercase : str =len(__A )
lowercase : Optional[Any] =matrix_length // 2
lowercase : str =[[a[i][j] for j in range(__A , __A )] for i in range(__A )]
lowercase : Union[str, Any] =[
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
lowercase : str =[[a[i][j] for j in range(__A )] for i in range(__A )]
lowercase : Dict =[[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def lowercase_ ( __A : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__A ), len(matrix[0] )
def lowercase_ ( __A : list ) -> None:
"""simple docstring"""
print('''\n'''.join(str(__A ) for line in matrix ) )
def lowercase_ ( __A : list , __A : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
lowercase , lowercase , lowercase , lowercase : Any =split_matrix(__A )
lowercase , lowercase , lowercase , lowercase : Optional[Any] =split_matrix(__A )
lowercase : Any =actual_strassen(__A , matrix_subtraction(__A , __A ) )
lowercase : Optional[Any] =actual_strassen(matrix_addition(__A , __A ) , __A )
lowercase : int =actual_strassen(matrix_addition(__A , __A ) , __A )
lowercase : Tuple =actual_strassen(__A , matrix_subtraction(__A , __A ) )
lowercase : List[Any] =actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
lowercase : Any =actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
lowercase : Optional[Any] =actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
lowercase : Optional[int] =matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
lowercase : Union[str, Any] =matrix_addition(__A , __A )
lowercase : Dict =matrix_addition(__A , __A )
lowercase : int =matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
lowercase : str =[]
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase_ ( __A : list , __A : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
lowercase : Dict =(
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__A )
lowercase : Dict =matrix_dimensions(__A )
lowercase : Optional[int] =matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase : Optional[Any] =max(*__A , *__A )
lowercase : int =int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
lowercase : List[Any] =matrixa
lowercase : Union[str, Any] =matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase : Optional[int] =actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
SCREAMING_SNAKE_CASE = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 8 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger()
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = field(default_factory=__A )
UpperCamelCase_ = field(default_factory=__A )
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tensor , UpperCAmelCase : Tensor ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase )
def __call__( self : Tuple , UpperCAmelCase : Tensor ) -> int:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 1
UpperCamelCase_ = field(default_factory=__A )
UpperCamelCase_ = field(default_factory=__A )
UpperCamelCase_ = True
def __call__( self : str , UpperCAmelCase : Tensor ) -> List[Any]:
'''simple docstring'''
lowercase : str =Tracker(self.dest )(UpperCAmelCase ).parametrized
lowercase : Optional[int] =Tracker(self.src )(UpperCAmelCase ).parametrized
lowercase : Any =list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) )
lowercase : List[Any] =list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) )
if len(UpperCAmelCase ) != len(UpperCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while'
f' destination module has {len(UpperCAmelCase )}.' )
for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : nn.Module ) -> str:
'''simple docstring'''
super().__init__()
lowercase : List[Tuple[str, nn.Module]] =[]
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f'Unexpected layer name {k}'
lowercase : List[Any] =len(UpperCAmelCase ) + 1
feature_blocks.append((f'res{block_index}', v) )
lowercase : List[Any] =nn.ModuleDict(UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : Tensor ) -> List[Any]:
'''simple docstring'''
return get_trunk_forward_outputs(
UpperCAmelCase , out_feat_keys=UpperCAmelCase , feature_blocks=self._feature_blocks , )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Union[str, Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : Optional[int] =x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Dict , UpperCAmelCase : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
lowercase : List[Any] =self.convert_name_to_timm(UpperCAmelCase )
lowercase : int =partial(lambda: (timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase ).eval(), None) )
else:
lowercase : Union[str, Any] =super().__getitem__(UpperCAmelCase )
return val
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __getitem__( self : List[str] , UpperCAmelCase : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
lowercase : Tuple =RegNetModel
else:
lowercase : Any =RegNetForImageClassification
return val
def lowercase_ ( __A : int , __A : List[Any] , __A : List[Tuple[str, str]] ) -> List[Any]:
"""simple docstring"""
for from_key, to_key in keys:
lowercase : str =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def lowercase_ ( __A : str , __A : Callable[[], nn.Module] , __A : Callable[[], nn.Module] , __A : RegNetConfig , __A : Path , __A : bool = True , ) -> Dict:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
lowercase , lowercase : Optional[Any] =from_model_func()
lowercase : str =our_model_func(__A ).eval()
lowercase : str =ModuleTransfer(src=__A , dest=__A , raise_if_mismatch=__A )
lowercase : List[Any] =torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__A )
if from_state_dict is not None:
lowercase : Union[str, Any] =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowercase : int =[('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowercase : str =manually_copy_vissl_head(__A , our_model.state_dict() , __A )
our_model.load_state_dict(__A )
lowercase : int =our_model(__A , output_hidden_states=__A )
lowercase : Dict =(
our_outputs.logits if isinstance(__A , __A ) else our_outputs.last_hidden_state
)
lowercase : Optional[int] =from_model(__A )
lowercase : Union[str, Any] =from_output[-1] if type(__A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowercase : Union[str, Any] =our_outputs.hidden_states[-1]
assert torch.allclose(__A , __A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=__A , )
lowercase : Optional[int] =2_2_4 if '''seer''' not in name else 3_8_4
# we can use the convnext one
lowercase : Tuple =AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=__A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=__A , )
print(F'Pushed {name}' )
def lowercase_ ( __A : Path , __A : str = None , __A : bool = True ) -> Optional[Any]:
"""simple docstring"""
lowercase : str ='''imagenet-1k-id2label.json'''
lowercase : int =1_0_0_0
lowercase : str =(1, num_labels)
lowercase : List[str] ='''huggingface/label-files'''
lowercase : Optional[int] =num_labels
lowercase : int =json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase : List[Any] ={int(__A ): v for k, v in idalabel.items()}
lowercase : Optional[Any] =idalabel
lowercase : Optional[int] ={v: k for k, v in idalabel.items()}
lowercase : Any =partial(__A , num_labels=__A , idalabel=__A , labelaid=__A )
lowercase : Optional[Any] ={
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
lowercase : List[Any] =NameToOurModelFuncMap()
lowercase : Any =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__A : str , __A : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
lowercase : Any =torch.hub.load_state_dict_from_url(__A , model_dir=str(__A ) , map_location='''cpu''' )
lowercase : Dict =model_func()
# check if we have a head, if yes add it
lowercase : int =files['''classy_state_dict''']['''base_model''']['''model''']
lowercase : Union[str, Any] =model_state_dict['''trunk''']
model.load_state_dict(__A )
return model.eval(), model_state_dict["heads"]
# pretrained
lowercase : Tuple =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase : Optional[int] =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase : List[str] =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase : str =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowercase : Dict =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase : Any =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowercase : Dict =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowercase : Optional[int] =partial(
__A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __A , __A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __A , __A , __A , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Tuple =mock.Mock()
lowercase : Optional[Any] =500
lowercase : Optional[Any] ={}
lowercase : List[str] =HTTPError
lowercase : List[str] ={}
# Download this model to make sure it's in the cache.
lowercase : Optional[Any] =BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase ) as mock_head:
lowercase : Optional[int] =BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : int =mock.Mock()
lowercase : Dict =500
lowercase : List[Any] ={}
lowercase : Union[str, Any] =HTTPError
lowercase : str ={}
# Download this model to make sure it's in the cache.
lowercase : Optional[Any] =GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase ) as mock_head:
lowercase : Any =GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
try:
lowercase : int =tempfile.mktemp()
with open(UpperCAmelCase , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , UpperCAmelCase )
lowercase : Optional[int] =AlbertTokenizer.from_pretrained(UpperCAmelCase )
finally:
os.remove(UpperCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , UpperCAmelCase )
lowercase : Tuple =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : Dict =AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def A__ ( cls : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def A__ ( cls : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def A__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[Any] =BertTokenizer(UpperCAmelCase )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
lowercase : Any =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase , repo_id='''test-tokenizer''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : Any =BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : str =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : str =BertTokenizer(UpperCAmelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
lowercase : List[Any] =BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowercase : Optional[int] =BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[Any] =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : List[str] =CustomTokenizer(UpperCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
lowercase : Any =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : int =os.path.join(UpperCAmelCase , '''vocab.txt''' )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Union[str, Any] =BertTokenizerFast.from_pretrained(UpperCAmelCase )
bert_tokenizer.save_pretrained(UpperCAmelCase )
lowercase : Optional[Any] =CustomTokenizerFast.from_pretrained(UpperCAmelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
lowercase : List[Any] =AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
lowercase : Dict =AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' , use_fast=UpperCAmelCase , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : List[str] =Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : int =Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =Trie()
lowercase : Optional[int] =trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCAmelCase , ['''AB''', '''C'''] )
| 8 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 8 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 1 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase : Tuple =Rectangle(height=0.5 , width=0.5 )
lowercase : str =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase : Any =Rectangle(height=0.2_5 , width=0.2_5 )
lowercase : Tuple =[mem.copy() for i in range(6 )]
lowercase : Tuple =[mem.copy() for i in range(6 )]
lowercase : Optional[int] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[str] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Dict =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Any =Text('''CPU''' , font_size=24 )
lowercase : Tuple =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
lowercase : Tuple =[mem.copy() for i in range(4 )]
lowercase : Dict =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[Any] =Text('''GPU''' , font_size=24 )
lowercase : Union[str, Any] =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
lowercase : List[str] =[mem.copy() for i in range(6 )]
lowercase : str =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Any =Text('''Model''' , font_size=24 )
lowercase : Dict =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
lowercase : List[str] =[]
lowercase : int =[]
for i, rect in enumerate(UpperCAmelCase ):
lowercase : List[Any] =fill.copy().set_fill(UpperCAmelCase , opacity=0.8 )
target.move_to(UpperCAmelCase )
model_arr.append(UpperCAmelCase )
lowercase : Union[str, Any] =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
lowercase : List[Any] =[meta_mem.copy() for i in range(6 )]
lowercase : Any =[meta_mem.copy() for i in range(6 )]
lowercase : Optional[int] =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : str =VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : Optional[Any] =VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowercase : List[str] =Text('''Disk''' , font_size=24 )
lowercase : Optional[int] =Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
lowercase : List[str] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : List[str] =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
lowercase : str =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
lowercase : List[Any] =MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) )
lowercase : Optional[int] =Square(0.3 )
input.set_fill(UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 )
self.play(Write(UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.0_2 )
self.play(MoveToTarget(UpperCAmelCase ) )
self.play(FadeOut(UpperCAmelCase ) )
lowercase : str =Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase : Optional[int] =MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
lowercase : int ={'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.0_2}
self.play(
Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase : Union[str, Any] =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
lowercase : Union[str, Any] =AnimationGroup(
FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase : str =0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase : Tuple =a_c
lowercase : Union[str, Any] =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , )
lowercase : Tuple =MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) )
self.wait()
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = None
@staticmethod
def A__ ( ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : int ) -> Any:
'''simple docstring'''
raise NotImplementedError
def A__ ( self : Tuple , UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
raise NotImplementedError
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def A__ ( cls : Any ) -> Dict:
'''simple docstring'''
return f'`pip install {cls.pip_package or cls.name}`'
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''optuna'''
@staticmethod
def A__ ( ) -> List[Any]:
'''simple docstring'''
return is_optuna_available()
def A__ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''ray'''
UpperCamelCase_ = '''\'ray[tune]\''''
@staticmethod
def A__ ( ) -> List[Any]:
'''simple docstring'''
return is_ray_available()
def A__ ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''sigopt'''
@staticmethod
def A__ ( ) -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''wandb'''
@staticmethod
def A__ ( ) -> List[Any]:
'''simple docstring'''
return is_wandb_available()
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
SCREAMING_SNAKE_CASE = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowercase_ ( ) -> str:
"""simple docstring"""
lowercase : Optional[int] =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__A ) > 0:
lowercase : int =available_backends[0].name
if len(__A ) > 1:
logger.info(
F'{len(__A )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
pass
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Any ) -> None:
'''simple docstring'''
lowercase : Any =data
lowercase : Node | None =None
def __iter__( self : Tuple ) -> int:
'''simple docstring'''
lowercase : List[Any] =self
lowercase : Dict =[]
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCAmelCase )
yield node.data
lowercase : Union[str, Any] =node.next_node
@property
def A__ ( self : Tuple ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Node(1)
SCREAMING_SNAKE_CASE = Node(2)
SCREAMING_SNAKE_CASE = Node(3)
SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE = Node(5)
SCREAMING_SNAKE_CASE = Node(6)
SCREAMING_SNAKE_CASE = Node(5)
SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 8 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''xlm'''
UpperCamelCase_ = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : List[Any] , UpperCAmelCase : Tuple=3_0145 , UpperCAmelCase : Dict=2048 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : str=16 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=512 , UpperCAmelCase : Optional[int]=2048**-0.5 , UpperCAmelCase : Any=1e-12 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : int=5 , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple="first" , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : str=5 , UpperCAmelCase : int=0 , UpperCAmelCase : int=0 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Tuple=0 , **UpperCAmelCase : Tuple , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =vocab_size
lowercase : int =emb_dim
lowercase : str =n_layers
lowercase : str =n_heads
lowercase : Optional[int] =dropout
lowercase : int =attention_dropout
lowercase : int =gelu_activation
lowercase : List[Any] =sinusoidal_embeddings
lowercase : Any =causal
lowercase : Any =asm
lowercase : Optional[Any] =n_langs
lowercase : Union[str, Any] =use_lang_emb
lowercase : Dict =layer_norm_eps
lowercase : Any =bos_index
lowercase : Any =eos_index
lowercase : Optional[Any] =pad_index
lowercase : str =unk_index
lowercase : Optional[int] =mask_index
lowercase : List[Any] =is_encoder
lowercase : str =max_position_embeddings
lowercase : List[Any] =embed_init_std
lowercase : str =init_std
lowercase : Optional[Any] =summary_type
lowercase : Optional[int] =summary_use_proj
lowercase : Any =summary_activation
lowercase : Optional[Any] =summary_proj_to_labels
lowercase : List[Any] =summary_first_dropout
lowercase : int =start_n_top
lowercase : List[Any] =end_n_top
lowercase : Any =mask_token_id
lowercase : Optional[Any] =lang_id
if "n_words" in kwargs:
lowercase : Dict =kwargs['''n_words''']
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , **UpperCAmelCase )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@property
def A__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Union[str, Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Optional[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 8 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=1 / 255 , UpperCAmelCase : int=True , ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase : List[str] =parent
lowercase : Union[str, Any] =batch_size
lowercase : List[Any] =num_channels
lowercase : List[str] =min_resolution
lowercase : Union[str, Any] =max_resolution
lowercase : Union[str, Any] =do_resize
lowercase : Optional[Any] =size
lowercase : List[Any] =do_normalize
lowercase : Tuple =image_mean
lowercase : Any =image_std
lowercase : Any =do_rescale
lowercase : str =rescale_factor
lowercase : Union[str, Any] =do_pad
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Tuple=False ) -> Tuple:
'''simple docstring'''
if not batched:
lowercase : str =image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
lowercase , lowercase : Dict =image.size
else:
lowercase , lowercase : int =image.shape[1], image.shape[2]
if w < h:
lowercase : List[Any] =int(self.size['''shortest_edge'''] * h / w )
lowercase : List[Any] =self.size['''shortest_edge''']
elif w > h:
lowercase : str =self.size['''shortest_edge''']
lowercase : str =int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : Union[str, Any] =self.size['''shortest_edge''']
lowercase : List[str] =self.size['''shortest_edge''']
else:
lowercase : Optional[Any] =[]
for image in image_inputs:
lowercase , lowercase : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : Union[str, Any] =max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
lowercase : Dict =max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessor if is_vision_available() else None
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =YolosImageProcessingTester(self )
@property
def A__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
lowercase : List[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowercase : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Union[str, Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase : Any =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowercase : int =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : str =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : int =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : int =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : List[Any] =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Union[str, Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
lowercase : Tuple =self.image_processing_class(do_resize=UpperCAmelCase , do_normalize=UpperCAmelCase , do_rescale=UpperCAmelCase )
# create random PyTorch tensors
lowercase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase : Any =image_processing_a.pad(UpperCAmelCase , return_tensors='''pt''' )
lowercase : Any =image_processing_a(UpperCAmelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : Union[str, Any] =json.loads(f.read() )
lowercase : List[Any] ={'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowercase : Any =YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase : Optional[int] =image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : Optional[int] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowercase : Union[str, Any] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase : Optional[Any] =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowercase : Any =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowercase : Optional[Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase : str =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowercase : Dict =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowercase : Optional[int] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify orig_size
lowercase : str =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowercase : Optional[Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
@slow
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Dict =json.loads(f.read() )
lowercase : Any ={'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowercase : Optional[Any] =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : Optional[Any] =YolosImageProcessor(format='''coco_panoptic''' )
lowercase : Optional[int] =image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : Optional[int] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowercase : Optional[int] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase : Dict =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowercase : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowercase : Tuple =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase : List[str] =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowercase : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowercase : Dict =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify masks
lowercase : Optional[int] =82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase )
# verify orig_size
lowercase : List[Any] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowercase : Union[str, Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
| 8 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE = '▁'
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = CamembertTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]="<s>" , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Union[str, Any]="<unk>" , UpperCAmelCase : Union[str, Any]="<pad>" , UpperCAmelCase : List[Any]="<mask>" , UpperCAmelCase : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase : Any , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Optional[int] =vocab_file
lowercase : Any =False if not self.vocab_file else True
def A__ ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : Dict =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Tuple =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
@property
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''padding_value''' ) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Dict =feat_extract.model_input_names[0]
lowercase : Union[str, Any] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCAmelCase ) == len(UpperCAmelCase ) for x, y in zip(UpperCAmelCase , processed_features[input_name] ) ) )
lowercase : Optional[Any] =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : List[Any] =BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowercase : int =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Union[str, Any] =feat_extract.model_input_names[0]
lowercase : List[str] =BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowercase : Tuple =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : Tuple =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : str =BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
lowercase : Dict =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : List[str] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def A__ ( self : int , UpperCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase : List[str] ):
lowercase : Tuple =len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : List[str] , UpperCAmelCase : Any ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase : str =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
lowercase : List[str] =feat_extract.model_input_names[0]
lowercase : Any =BatchFeature({input_name: speech_inputs} )
lowercase : List[str] =self.feat_extract_tester.seq_length_diff
lowercase : Union[str, Any] =self.feat_extract_tester.max_seq_length + pad_diff
lowercase : Union[str, Any] =self.feat_extract_tester.min_seq_length
lowercase : Tuple =self.feat_extract_tester.batch_size
lowercase : List[str] =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase : Union[str, Any] =feat_extract.pad(UpperCAmelCase , padding=UpperCAmelCase )
lowercase : str =input_a[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' )
lowercase : Any =input_a[input_name]
lowercase : Tuple =feat_extract.pad(UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : Tuple =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
lowercase : str =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''max_length''' )[input_name]
lowercase : List[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , return_tensors='''np''' )
lowercase : Union[str, Any] =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase : str =feat_extract.pad(UpperCAmelCase , pad_to_multiple_of=10 )
lowercase : int =input_a[input_name]
lowercase : Any =feat_extract.pad(UpperCAmelCase , padding='''longest''' , pad_to_multiple_of=10 )
lowercase : List[Any] =input_a[input_name]
lowercase : Union[str, Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCAmelCase )
lowercase : int =input_a[input_name]
lowercase : Dict =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCAmelCase , return_tensors='''np''' , )
lowercase : Any =input_a[input_name]
self.assertTrue(all(len(UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
lowercase : Union[str, Any] =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase : Union[str, Any] =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int]=False ) -> Optional[Any]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase : int ):
lowercase : List[Any] =len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : Dict , UpperCAmelCase : Any ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Union[str, Any] =self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
lowercase : List[str] =feat_extract.model_input_names[0]
lowercase : Union[str, Any] =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase : Union[str, Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=UpperCAmelCase )
lowercase : List[str] =input_a[input_name]
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
lowercase : Tuple =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to smallest with np
lowercase : Any =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=UpperCAmelCase , )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : List[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
lowercase : int =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to middle
lowercase : Optional[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase , return_tensors='''np''' , )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : int =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase )
lowercase : str =input_a[input_name]
lowercase : str =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
lowercase : Tuple =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''longest''' , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''longest''' , truncation=UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase : int =12
lowercase : List[str] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
lowercase : int =input_a[input_name]
lowercase : Tuple =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , )
lowercase : Dict =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase : Dict =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase : List[Any] =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
def A__ ( self : str ) -> int:
'''simple docstring'''
self._check_padding(numpify=UpperCAmelCase )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
self._check_padding(numpify=UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=UpperCAmelCase )
@require_torch
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : str =feat_extract.model_input_names[0]
lowercase : Optional[int] =BatchFeature({input_name: speech_inputs} )
lowercase : Optional[Any] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Tuple =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : Optional[Any] =feat_extract.model_input_names[0]
lowercase : int =BatchFeature({input_name: speech_inputs} )
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : str =self.feat_extract_dict
lowercase : Union[str, Any] =True
lowercase : List[str] =self.feature_extraction_class(**UpperCAmelCase )
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : int =[len(UpperCAmelCase ) for x in speech_inputs]
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : Dict =BatchFeature({input_name: speech_inputs} )
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Dict =self.feat_extract_dict
lowercase : List[str] =True
lowercase : Optional[Any] =self.feature_extraction_class(**UpperCAmelCase )
lowercase : Any =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : Optional[Any] =[len(UpperCAmelCase ) for x in speech_inputs]
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : Optional[Any] =BatchFeature({input_name: speech_inputs} )
lowercase : List[str] =min(UpperCAmelCase )
lowercase : Any =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowercase_ ( __A : list[float] ) -> Any:
"""simple docstring"""
return np.maximum(0 , __A )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE = 100
SCREAMING_SNAKE_CASE = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def lowercase_ ( __A : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase : set[int] =set()
lowercase : int
lowercase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowercase_ ( __A : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __A ):
if len(partition(__A ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : list ) -> list:
"""simple docstring"""
lowercase : str =False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase : Optional[Any] =True
for i in range(0 , len(__A ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase , lowercase : Dict =input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase : Dict =False
for i in range(1 , len(__A ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase , lowercase : Union[str, Any] =input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase : Dict =False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
SCREAMING_SNAKE_CASE = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 8 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 1 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 32
def lowercase_ ( __A : Accelerator , __A : int = 1_6 ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : str =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__A : str ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Tuple =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Union[str, Any] =datasets.map(
__A , batched=__A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Optional[Any] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__A : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : List[Any] =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Optional[Any] =1_6
elif accelerator.mixed_precision != "no":
lowercase : str =8
else:
lowercase : Optional[Any] =None
return tokenizer.pad(
__A , padding='''longest''' , max_length=__A , pad_to_multiple_of=__A , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase : Dict =DataLoader(
tokenized_datasets['''train'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
lowercase : Optional[int] =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE = mocked_dataloaders # noqa: F811
def lowercase_ ( __A : int , __A : int ) -> Dict:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __A ) == "1":
lowercase : Union[str, Any] =2
# Initialize accelerator
lowercase : str =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Dict =config['''lr''']
lowercase : Tuple =int(config['''num_epochs'''] )
lowercase : List[Any] =int(config['''seed'''] )
lowercase : Tuple =int(config['''batch_size'''] )
lowercase : Union[str, Any] =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__A )
def inner_training_loop(__A : Tuple ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[Any] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Optional[Any] =model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any =AdamW(params=model.parameters() , lr=__A )
lowercase , lowercase : Tuple =get_dataloaders(__A , __A )
# Instantiate scheduler
lowercase : Any =get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=1_0_0 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : List[Any] =accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase : List[str] =model(**__A )
lowercase : int =outputs.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : List[Any] =model(**__A )
lowercase : Optional[Any] =outputs.logits.argmax(dim=-1 )
lowercase , lowercase : List[Any] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__A , references=__A , )
lowercase : Optional[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __A )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : Any =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__A , default=__A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase : Tuple =parser.parse_args()
lowercase : Optional[Any] ={'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 8 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : List[Any] =inspect.getfile(accelerate.test_utils )
lowercase : int =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowercase : Any =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
lowercase : Union[str, Any] =[sys.executable] + distributed_args
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : Tuple , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowercase : List[Any] =torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase , )
lowercase : Optional[int] =image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : str =self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : List[Any] =self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
lowercase : List[str] =(image / 2 + 0.5).clamp(0 , 1 )
lowercase : Any =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Tuple =self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase ), "This is a local test"
| 8 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowercase_ ( __A : str , __A : str ) -> str | Literal[False]:
"""simple docstring"""
lowercase : str =list(__A )
lowercase : Optional[Any] =list(__A )
lowercase : str =0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count += 1
lowercase : Union[str, Any] ='''_'''
if count > 1:
return False
else:
return "".join(__A )
def lowercase_ ( __A : list[str] ) -> list[str]:
"""simple docstring"""
lowercase : int =[]
while True:
lowercase : Optional[int] =['''$'''] * len(__A )
lowercase : int =[]
for i in range(len(__A ) ):
for j in range(i + 1 , len(__A ) ):
lowercase : str =compare_string(binary[i] , binary[j] )
if k is False:
lowercase : List[str] ='''*'''
lowercase : Any ='''*'''
temp.append('''X''' )
for i in range(len(__A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__A ) == 0:
return pi
lowercase : Union[str, Any] =list(set(__A ) )
def lowercase_ ( __A : int , __A : Sequence[float] ) -> list[str]:
"""simple docstring"""
lowercase : str =[]
for minterm in minterms:
lowercase : Dict =''''''
for _ in range(__A ):
lowercase : Union[str, Any] =str(minterm % 2 ) + string
minterm //= 2
temp.append(__A )
return temp
def lowercase_ ( __A : str , __A : str , __A : int ) -> bool:
"""simple docstring"""
lowercase : Dict =list(__A )
lowercase : Tuple =list(__A )
lowercase : int =0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowercase_ ( __A : list[list[int]] , __A : list[str] ) -> list[str]:
"""simple docstring"""
lowercase : Optional[Any] =[]
lowercase : List[str] =[0] * len(__A )
for i in range(len(chart[0] ) ):
lowercase : int =0
lowercase : List[Any] =-1
for j in range(len(__A ) ):
if chart[j][i] == 1:
count += 1
lowercase : List[Any] =j
if count == 1:
lowercase : int =1
for i in range(len(__A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__A ) ):
lowercase : Any =0
temp.append(prime_implicants[i] )
while True:
lowercase : Any =0
lowercase : Any =-1
lowercase : List[str] =0
for i in range(len(__A ) ):
lowercase : List[Any] =chart[i].count(1 )
if count_n > max_n:
lowercase : Dict =count_n
lowercase : Union[str, Any] =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__A ) ):
lowercase : int =0
def lowercase_ ( __A : list[str] , __A : list[str] ) -> list[list[int]]:
"""simple docstring"""
lowercase : List[Any] =[[0 for x in range(len(__A ) )] for x in range(len(__A ) )]
for i in range(len(__A ) ):
lowercase : Union[str, Any] =prime_implicants[i].count('''_''' )
for j in range(len(__A ) ):
if is_for_table(prime_implicants[i] , binary[j] , __A ):
lowercase : Union[str, Any] =1
return chart
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : Union[str, Any] =int(input('''Enter the no. of variables\n''' ) )
lowercase : Dict =[
float(__A )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowercase : int =decimal_to_binary(__A , __A )
lowercase : Tuple =check(__A )
print('''Prime Implicants are:''' )
print(__A )
lowercase : Optional[Any] =prime_implicant_chart(__A , __A )
lowercase : Tuple =selection(__A , __A )
print('''Essential Prime Implicants are:''' )
print(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 8 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 8 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
SCREAMING_SNAKE_CASE = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = DPRContextEncoderTokenizer
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = DPRQuestionEncoderTokenizer
SCREAMING_SNAKE_CASE = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
SCREAMING_SNAKE_CASE = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
SCREAMING_SNAKE_CASE = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__A )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Union[bool, str] = False , UpperCAmelCase : Union[bool, str] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
elif titles is None or texts is None:
lowercase : Optional[int] =titles if texts is None else texts
return super().__call__(
UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
lowercase : int =titles if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [titles]
lowercase : List[str] =texts if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [texts]
lowercase : Optional[int] =len(UpperCAmelCase )
lowercase : Dict =questions if not isinstance(UpperCAmelCase , UpperCAmelCase ) else [questions] * n_passages
assert len(UpperCAmelCase ) == len(
UpperCAmelCase ), f'There should be as many titles than texts but got {len(UpperCAmelCase )} titles and {len(UpperCAmelCase )} texts.'
lowercase : Any =super().__call__(UpperCAmelCase , UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase )['''input_ids''']
lowercase : Union[str, Any] =super().__call__(UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase )['''input_ids''']
lowercase : Optional[Any] ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase , UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowercase : Any =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase : Optional[int] =attention_mask
return self.pad(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors=UpperCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : BatchEncoding , UpperCAmelCase : DPRReaderOutput , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase : str =reader_input['''input_ids''']
lowercase , lowercase , lowercase : str =reader_output[:3]
lowercase : Optional[Any] =len(UpperCAmelCase )
lowercase : List[str] =sorted(range(UpperCAmelCase ) , reverse=UpperCAmelCase , key=relevance_logits.__getitem__ )
lowercase : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
lowercase : Optional[int] =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase : Any =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase : Tuple =sequence_ids.index(self.pad_token_id )
else:
lowercase : Union[str, Any] =len(UpperCAmelCase )
lowercase : str =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase , top_spans=UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase , start_index=UpperCAmelCase , end_index=UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : List[int] , UpperCAmelCase : int , UpperCAmelCase : int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowercase : Optional[Any] =[]
for start_index, start_score in enumerate(UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase : List[str] =sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] , reverse=UpperCAmelCase )
lowercase : Optional[Any] =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowercase : Optional[Any] =end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__A )
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = DPRReaderTokenizer
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 1 |
'''simple docstring'''
def lowercase_ ( __A : int = 1_0_0_0 ) -> int:
"""simple docstring"""
lowercase : List[str] =-1
lowercase : Optional[int] =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase : Tuple =(n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase : Any =n - a - b
if c * c == (a * a + b * b):
lowercase : int =a * b * c
if candidate >= product:
lowercase : Dict =candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : list ) -> None:
'''simple docstring'''
lowercase : Union[str, Any] =set_counts
lowercase : Any =max(UpperCAmelCase )
lowercase : Optional[Any] =len(UpperCAmelCase )
lowercase : Tuple =[1] * num_sets
lowercase : List[Any] =list(range(UpperCAmelCase ) )
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int ) -> bool:
'''simple docstring'''
lowercase : int =self.get_parent(UpperCAmelCase )
lowercase : int =self.get_parent(UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase : str =0
lowercase : Dict =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase : Optional[Any] =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase : List[str] =0
lowercase : Tuple =src_parent
lowercase : int =self.set_counts[src_parent]
lowercase : Union[str, Any] =max(self.max_set , UpperCAmelCase )
return True
def A__ ( self : int , UpperCAmelCase : int ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase : Tuple =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 8 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''table-transformer'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : int , UpperCAmelCase : Dict=True , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Dict=100 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : int=2048 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Any=6 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Union[str, Any]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : str=1.0 , UpperCAmelCase : int=False , UpperCAmelCase : List[str]="sine" , UpperCAmelCase : Tuple="resnet50" , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : int=5 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=0.1 , **UpperCAmelCase : List[str] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : List[Any] =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Any =backbone_config.get('''model_type''' )
lowercase : Union[str, Any] =CONFIG_MAPPING[backbone_model_type]
lowercase : Dict =config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
lowercase , lowercase , lowercase : Union[str, Any] =None, None, None
lowercase : Optional[int] =use_timm_backbone
lowercase : Tuple =backbone_config
lowercase : int =num_channels
lowercase : int =num_queries
lowercase : List[str] =d_model
lowercase : int =encoder_ffn_dim
lowercase : Tuple =encoder_layers
lowercase : Optional[Any] =encoder_attention_heads
lowercase : Dict =decoder_ffn_dim
lowercase : Optional[Any] =decoder_layers
lowercase : List[Any] =decoder_attention_heads
lowercase : Optional[int] =dropout
lowercase : List[Any] =attention_dropout
lowercase : str =activation_dropout
lowercase : List[Any] =activation_function
lowercase : str =init_std
lowercase : int =init_xavier_std
lowercase : str =encoder_layerdrop
lowercase : Any =decoder_layerdrop
lowercase : str =encoder_layers
lowercase : Any =auxiliary_loss
lowercase : Any =position_embedding_type
lowercase : str =backbone
lowercase : List[Any] =use_pretrained_backbone
lowercase : str =dilation
# Hungarian matcher
lowercase : Union[str, Any] =class_cost
lowercase : Tuple =bbox_cost
lowercase : Optional[Any] =giou_cost
# Loss coefficients
lowercase : str =mask_loss_coefficient
lowercase : List[Any] =dice_loss_coefficient
lowercase : Any =bbox_loss_coefficient
lowercase : Any =giou_loss_coefficient
lowercase : str =eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = version.parse('''1.11''' )
@property
def A__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self : str ) -> float:
'''simple docstring'''
return 1e-5
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
return 12
| 8 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Optional[int] =SMALL_MODEL_IDENTIFIER
lowercase : int ='''pt'''
lowercase : Tuple ='''tf'''
def A__ ( self : Any , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
lowercase : List[Any] =AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Tuple =TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCAmelCase )
model_tf.save_pretrained(UpperCAmelCase )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] ='''mock_framework'''
# Framework provided - return whatever the user provides
lowercase : Optional[int] =FeaturesManager.determine_framework(self.test_model , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase )
lowercase : Union[str, Any] =FeaturesManager.determine_framework(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase )
lowercase : Optional[Any] =FeaturesManager.determine_framework(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase )
lowercase : List[str] =FeaturesManager.determine_framework(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase )
lowercase : Optional[Any] =FeaturesManager.determine_framework(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCAmelCase ):
lowercase : Union[str, Any] =FeaturesManager.determine_framework(UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =MagicMock(return_value=UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCAmelCase ):
lowercase : List[Any] =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase : Optional[int] =MagicMock(return_value=UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , UpperCAmelCase ):
lowercase : Tuple =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase : List[str] =MagicMock(return_value=UpperCAmelCase )
lowercase : List[str] =MagicMock(return_value=UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCAmelCase ):
lowercase : Tuple =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowercase : str =MagicMock(return_value=UpperCAmelCase )
lowercase : str =MagicMock(return_value=UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , UpperCAmelCase ):
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =FeaturesManager.determine_framework(self.test_model )
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ = Features({'''text''': Value('''string''' )} )
UpperCamelCase_ = Features({'''summary''': Value('''string''' )} )
UpperCamelCase_ = "text"
UpperCamelCase_ = "summary"
@property
def A__ ( self : str ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''input_features''', '''attention_mask''']
def __init__( self : Dict , UpperCAmelCase : List[str]=80 , UpperCAmelCase : Optional[int]=1_6000 , UpperCAmelCase : Tuple=80 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : str=True , **UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
lowercase : Dict =num_mel_bins
lowercase : str =do_ceptral_normalize
lowercase : Union[str, Any] =normalize_means
lowercase : Optional[Any] =normalize_vars
lowercase : List[str] =True
def A__ ( self : List[str] , UpperCAmelCase : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase : List[str] =torch.from_numpy(UpperCAmelCase ).unsqueeze(0 )
lowercase : Dict =ta_kaldi.fbank(UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
lowercase : str =x[:input_length].mean(axis=0 )
lowercase : Union[str, Any] =np.subtract(UpperCAmelCase , UpperCAmelCase )
if normalize_vars:
lowercase : List[str] =x[:input_length].std(axis=0 )
lowercase : Optional[int] =np.divide(UpperCAmelCase , UpperCAmelCase )
if input_length < x.shape[0]:
lowercase : Any =padding_value
# make sure array is in float32
lowercase : Any =x.astype(np.floataa )
return x
def A__ ( self : Optional[Any] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowercase : Union[str, Any] =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase , UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase , UpperCAmelCase )
]
def __call__( self : Optional[Any] , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : List[Any] , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Optional[Any] =isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowercase : Optional[int] =is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] =[np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
lowercase : int =np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Tuple =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Any =[raw_speech]
# extract fbank features
lowercase : Any =[self._extract_fbank_features(UpperCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowercase : Optional[Any] =BatchFeature({'''input_features''': features} )
lowercase : List[str] =self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
# make sure list is in array format
lowercase : Any =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase ):
lowercase : Optional[Any] =[np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
lowercase : Union[str, Any] =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase : int =[np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase : List[str] =(
np.array(UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase : int =self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase )
if return_tensors is not None:
lowercase : Dict =padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 8 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableUnCLIPImgaImgPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ = frozenset([] )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =32
lowercase : Optional[Any] =embedder_hidden_size
# image encoding components
lowercase : List[str] =CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase : str =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase : int =StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase : Union[str, Any] =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase : str =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase : int =DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase : str =AutoencoderKL()
lowercase : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def A__ ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : int=True ) -> Optional[Any]:
'''simple docstring'''
if str(UpperCAmelCase ).startswith('''mps''' ):
lowercase : List[str] =torch.manual_seed(UpperCAmelCase )
else:
lowercase : Any =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase : str =input_image * 0.5 + 0.5
lowercase : Tuple =input_image.clamp(0 , 1 )
lowercase : Tuple =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase : Dict =DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] =self.get_dummy_components()
lowercase : List[Any] =StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase : str =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase )
inputs.update({'''image_embeds''': None} )
lowercase : int =sd_pipe(**UpperCAmelCase ).images
lowercase : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : str =np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : int =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowercase : Union[str, Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : Dict =pipe(UpperCAmelCase , '''anime turle''' , generator=UpperCAmelCase , output_type='''np''' )
lowercase : int =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowercase : Tuple =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Any =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : Optional[Any] =pipe(UpperCAmelCase , '''anime turle''' , generator=UpperCAmelCase , output_type='''np''' )
lowercase : str =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
lowercase : List[Any] =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : List[str] =pipe(
UpperCAmelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
lowercase : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 8 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 1 |
'''simple docstring'''
import sys
SCREAMING_SNAKE_CASE = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : Optional[int] =1
for digit in s:
product *= int(__A )
return product
def lowercase_ ( __A : str = N ) -> int:
"""simple docstring"""
lowercase : List[str] =-sys.maxsize - 1
lowercase : Optional[Any] =n[:1_3]
lowercase : Union[str, Any] =1_3
while cur_index < len(__A ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase : Optional[Any] =substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase : Dict =max(__A , str_eval(__A ) )
lowercase : Dict =n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : int , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
return {}, {}, {}
def A__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
lowercase : List[Any] =load_image(UpperCAmelCase )
lowercase : Optional[Any] =image.size
lowercase : int =self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self : str , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =self.model(**UpperCAmelCase )
return model_outputs
def A__ ( self : Tuple , UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
lowercase : List[str] =model_outputs.predicted_depth
lowercase : List[str] =torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=UpperCAmelCase )
lowercase : List[Any] =prediction.squeeze().cpu().numpy()
lowercase : Union[str, Any] =(output * 255 / np.max(UpperCAmelCase )).astype('''uint8''' )
lowercase : Dict =Image.fromarray(UpperCAmelCase )
lowercase : Optional[Any] ={}
lowercase : Union[str, Any] =predicted_depth
lowercase : int =depth
return output_dict
| 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 1 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 1_0, '''max_num_jobs''': 1}, [range(1_0 )]),
({'''num_shards''': 1_0, '''max_num_jobs''': 1_0}, [range(__A , i + 1 ) for i in range(1_0 )]),
({'''num_shards''': 1, '''max_num_jobs''': 1_0}, [range(1 )]),
({'''num_shards''': 1_0, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({'''num_shards''': 3, '''max_num_jobs''': 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowercase_ ( __A : Optional[int] , __A : str ) -> Any:
"""simple docstring"""
lowercase : int =_distribute_shards(**__A )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 1_0, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def lowercase_ ( __A : str , __A : str , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict =_split_gen_kwargs(__A , __A )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def lowercase_ ( __A : str , __A : Any ) -> int:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(__A ):
_number_of_shards_in_gen_kwargs(__A )
else:
lowercase : int =_number_of_shards_in_gen_kwargs(__A )
assert out == expected
| 8 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE = 'tf'
else:
SCREAMING_SNAKE_CASE = 'jax'
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ByTaTokenizer
UpperCamelCase_ = False
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase : str =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def A__ ( self : str , **UpperCAmelCase : Any ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=20 , UpperCAmelCase : Union[str, Any]=5 ) -> Tuple[str, list]:
'''simple docstring'''
lowercase : int =[]
for i in range(len(UpperCAmelCase ) ):
try:
lowercase : Any =tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase : Any =list(filter(lambda UpperCAmelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , UpperCAmelCase ) )
lowercase : Union[str, Any] =list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
lowercase : Dict =toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
lowercase : Dict =toks + toks
# toks_str = [t[1] for t in toks]
lowercase : Union[str, Any] =[t[0] for t in toks]
# Ensure consistency
lowercase : Optional[Any] =tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
lowercase : List[str] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
lowercase : Any =''' ''' + output_txt
lowercase : Optional[int] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Any =self.ta_base_tokenizer
lowercase : str =tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowercase : int =tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] =self.ta_base_tokenizer
lowercase : List[str] ='''Unicode €.'''
lowercase : Optional[Any] =tokenizer(UpperCAmelCase )
lowercase : List[Any] =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCAmelCase )
# decoding
lowercase : List[str] =tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '''Unicode €.</s>''' )
lowercase : Dict =tokenizer('''e è é ê ë''' )
lowercase : List[str] =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCAmelCase )
# decoding
lowercase : str =tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Tuple =self.ta_base_tokenizer
lowercase : Optional[Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowercase : int =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase : Dict =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase : List[Any] =list(batch.input_ids.numpy()[0] )
else:
lowercase : int =list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def A__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.ta_base_tokenizer
lowercase : Any =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase : Dict =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''decoder_input_ids''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =self.ta_base_tokenizer
lowercase : Dict =[
'''Summary of the text.''',
'''Another summary.''',
]
lowercase : List[Any] =tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =self.ta_base_tokenizer
lowercase : Union[str, Any] =['''A long paragraph for summarization. </s>''']
lowercase : Dict =['''Summary of the text. </s>''']
# fmt: off
lowercase : Tuple =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase : Optional[int] =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase : Tuple =tokenizer(UpperCAmelCase , text_target=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , batch['''input_ids'''][0] )
self.assertEqual(UpperCAmelCase , batch['''labels'''][0] )
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase : Optional[int] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase : Any =tempfile.mkdtemp()
lowercase : Dict =''' He is very happy, UNwant\u00E9d,running'''
lowercase : Optional[int] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowercase : str =tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowercase : Dict =after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
lowercase : Tuple =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase : Optional[int] =tempfile.mkdtemp()
lowercase : Dict =''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowercase : Optional[Any] =tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowercase : str =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowercase : Tuple =tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowercase : Optional[int] =after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase : List[Any] =tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[int] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase : List[Any] =json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase : int =json.load(UpperCAmelCase )
lowercase : List[str] =[f'<extra_id_{i}>' for i in range(125 )]
lowercase : Dict =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowercase : List[Any] =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase : Optional[int] =tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase : List[Any] =added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCAmelCase )]
lowercase : int =tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
lowercase : Union[str, Any] =tokenizer_class.from_pretrained(UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self : Any ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
def A__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : Tuple =self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowercase : Dict =['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowercase : Optional[int] =tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowercase : List[str] =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowercase : Dict =0
lowercase : Any =tokenizer.convert_ids_to_tokens(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
for attr in attributes_list:
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [] )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 8 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
UpperCamelCase_ = '''swin'''
UpperCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int]=224 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Any=3 , UpperCAmelCase : Dict=96 , UpperCAmelCase : Union[str, Any]=[2, 2, 6, 2] , UpperCAmelCase : Optional[int]=[3, 6, 12, 24] , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Union[str, Any]=4.0 , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : str=False , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : Optional[int]=1e-5 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Optional[int] =image_size
lowercase : str =patch_size
lowercase : Dict =num_channels
lowercase : Optional[int] =embed_dim
lowercase : int =depths
lowercase : List[str] =len(UpperCAmelCase )
lowercase : List[str] =num_heads
lowercase : Union[str, Any] =window_size
lowercase : Union[str, Any] =mlp_ratio
lowercase : List[Any] =qkv_bias
lowercase : Optional[int] =hidden_dropout_prob
lowercase : List[str] =attention_probs_dropout_prob
lowercase : Optional[Any] =drop_path_rate
lowercase : Optional[int] =hidden_act
lowercase : Tuple =use_absolute_embeddings
lowercase : Optional[int] =layer_norm_eps
lowercase : Optional[int] =initializer_range
lowercase : List[Any] =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : str =int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
lowercase : List[Any] =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCAmelCase ) + 1 )]
lowercase , lowercase : Union[str, Any] =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = version.parse('''1.11''' )
@property
def A__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A__ ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 1e-4
| 8 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
lowercase : str =val
lowercase : Tuple =None
lowercase : List[str] =None
def A__ ( self : Any , UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
lowercase : str =Node(UpperCAmelCase )
else:
self.left.insert(UpperCAmelCase )
elif val > self.val:
if self.right is None:
lowercase : List[Any] =Node(UpperCAmelCase )
else:
self.right.insert(UpperCAmelCase )
else:
lowercase : Tuple =val
def lowercase_ ( __A : Any , __A : List[Any] ) -> Tuple:
"""simple docstring"""
if root:
inorder(root.left , __A )
res.append(root.val )
inorder(root.right , __A )
def lowercase_ ( __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if len(__A ) == 0:
return arr
lowercase : List[str] =Node(arr[0] )
for i in range(1 , len(__A ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase : int =[]
inorder(__A , __A )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 8 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase : Tuple[int] = (64,) , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "silu" , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 256 , UpperCAmelCase : int = 32 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : float = 0.1_8_2_1_5 , UpperCAmelCase : str = "group" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowercase : Union[str, Any] =Encoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , )
lowercase : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowercase : Any =nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
lowercase : Optional[Any] =VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.2_5 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase )
lowercase : List[Any] =nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
# pass init params to Decoder
lowercase : Union[str, Any] =Decoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , )
@apply_forward_hook
def A__ ( self : Tuple , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
lowercase : Tuple =self.encoder(UpperCAmelCase )
lowercase : Any =self.quant_conv(UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase )
@apply_forward_hook
def A__ ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
lowercase , lowercase , lowercase : Tuple =self.quantize(UpperCAmelCase )
else:
lowercase : Dict =h
lowercase : Tuple =self.post_quant_conv(UpperCAmelCase )
lowercase : List[Any] =self.decoder(UpperCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
def A__ ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
lowercase : int =sample
lowercase : Dict =self.encode(UpperCAmelCase ).latents
lowercase : Optional[Any] =self.decode(UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
| 8 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
UpperCamelCase_ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
UpperCamelCase_ = '''document_qa'''
UpperCamelCase_ = AutoProcessor
UpperCamelCase_ = VisionEncoderDecoderModel
UpperCamelCase_ = ['''image''', '''text''']
UpperCamelCase_ = ['''text''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> int:
'''simple docstring'''
lowercase : int ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase : Union[str, Any] =task_prompt.replace('''{user_input}''' , UpperCAmelCase )
lowercase : str =self.pre_processor.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors='''pt''' ).input_ids
lowercase : Optional[int] =self.pre_processor(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A__ ( self : Tuple , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase , ).sequences
def A__ ( self : str , UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
lowercase : Dict =self.pre_processor.batch_decode(UpperCAmelCase )[0]
lowercase : Tuple =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase : Optional[Any] =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase : List[str] =re.sub(R'''<.*?>''' , '''''' , UpperCAmelCase , count=1 ).strip() # remove first task start token
lowercase : Union[str, Any] =self.pre_processor.tokenajson(UpperCAmelCase )
return sequence["answer"]
| 8 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.