code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : List[Any] = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[PIL.Image.Image, np.ndarray]
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE__ , image_encoder=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , renderer=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if latents is None:
SCREAMING_SNAKE_CASE__ : List[Any] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE__ : Any = latents.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = latents * scheduler.init_noise_sigma
return latents
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.device(F'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> Any:
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Any = torch.cat(SCREAMING_SNAKE_CASE__ , axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = image.to(dtype=self.image_encoder.dtype , device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.image_encoder(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : Tuple = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
SCREAMING_SNAKE_CASE__ : Optional[int] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : str = torch.zeros_like(SCREAMING_SNAKE_CASE__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 25 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 4.0 , SCREAMING_SNAKE_CASE__ = 64 , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ) -> List[str]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Optional[int] = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
SCREAMING_SNAKE_CASE__ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
SCREAMING_SNAKE_CASE__ : Any = self._execution_device
SCREAMING_SNAKE_CASE__ : Tuple = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE__ : Optional[Any] = guidance_scale > 1.0
SCREAMING_SNAKE_CASE__ : Any = self._encode_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = self.scheduler.timesteps
SCREAMING_SNAKE_CASE__ : List[Any] = self.prior.config.num_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prior.config.embedding_dim
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
SCREAMING_SNAKE_CASE__ : List[Any] = latents.reshape(latents.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.prior(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , proj_embedding=SCREAMING_SNAKE_CASE__ , ).predicted_image_embedding
# remove the variance
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE__ ):
print()
SCREAMING_SNAKE_CASE__ : Tuple = self.renderer.decode(
latent[None, :] , SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.stack(SCREAMING_SNAKE_CASE__ )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
SCREAMING_SNAKE_CASE__ : Any = images.cpu().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 223 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """depth_multiplier""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.25 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="relu6" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : Any = depth_multiplier
SCREAMING_SNAKE_CASE__ : int = min_depth
SCREAMING_SNAKE_CASE__ : Any = tf_padding
SCREAMING_SNAKE_CASE__ : int = int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE__ : Any = output_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCamelCase : Dict = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Tuple = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 223 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_=16 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=4 , lowerCAmelCase_=[0, 1, 2, 3] , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=[1, 3_84, 24, 24] , lowerCAmelCase_=True , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = backbone_out_indices
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = backbone_featmap_shape
_snake_case = scope
_snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 1_92, 3_84, 7_68],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowerCAmelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = DPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = DPTForDepthEstimation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = DPTForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DPTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
if model_class in get_values(lowerCAmelCase_ ):
continue
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_snake_case = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowerCamelCase ( self ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = False
_snake_case = True
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_snake_case = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
_snake_case = model_class(config=lowerCAmelCase_ )
# Skip the check for the backbone
_snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_snake_case = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_snake_case = DPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 'add'
with self.assertRaises(lowerCAmelCase_ ):
_snake_case = DPTForDepthEstimation(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_snake_case = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(lowerCAmelCase_ )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )
_snake_case = outputs.predicted_depth
# verify the predicted depth
_snake_case = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , lowerCAmelCase_ , atol=1E-4 ) )
| 542 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = StableDiffusionInstructPixaPixPipeline
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 'french fries'
_snake_case = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = [inputs['prompt']] * 2
_snake_case = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
_snake_case = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
_snake_case = image / 2 + 0.5
_snake_case = image.permute(0 , 3 , 1 , 2 )
_snake_case = image.repeat(2 , 1 , 1 , 1 )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_snake_case = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' )
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = sd_pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
_snake_case = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
_snake_case = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
_snake_case = components['vae']
_snake_case = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_snake_case = vae.encode(inputs[image_param] ).latent_dist.mode()
_snake_case = pipe(**lowerCAmelCase_ )[0]
_snake_case = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
_snake_case = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
_snake_case = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0
def callback_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_snake_case = False
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
_snake_case = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_snake_case = inputs['image'].resize((5_04, 5_04) )
_snake_case = 'timbrooks/instruct-pix2pix'
_snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_snake_case = pipe(**lowerCAmelCase_ )
_snake_case = output.images[0]
_snake_case = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_snake_case = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 542 | 1 |
"""simple docstring"""
from torch import nn
def snake_case_ ( A_ : int ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 83 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Union[str, Any] = '''▁'''
A__: Any = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
A__: Optional[int] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
A__: Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :int="</s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE :Tuple="<unk>" , SCREAMING_SNAKE_CASE :Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : str =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : Dict =vocab_file
_a : int =monolingual_vocab_file
_a : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : List[Any] ={}
_a : List[str] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[Any] =cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_a : int =line.strip().split()[0]
_a : str =len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE ) not in self.fairseq_tokens_to_ids:
_a : Optional[int] =len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =self.__dict__.copy()
_a : Optional[Any] =None
_a : str =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> str:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple ={}
_a : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
_a : int =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : List[str] =[self.sep_token_id]
_a : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : str ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : str ="""""".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Any =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(SCREAMING_SNAKE_CASE )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : int | str ) -> bool:
"""simple docstring"""
A__ : List[str] =str(__snake_case )
return n == n[::-1]
def __lowerCamelCase ( __snake_case : int = 1_000_000 ) -> List[str]:
"""simple docstring"""
A__ : List[Any] =0
for i in range(1, __snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 687 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 273 |
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , ) -> float:
a__ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a__ = 1 - (matter_density + radiation_density + dark_energy)
a__ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a__ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 273 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase : Optional[int] = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "rag"
__magic_name__ : Any = True
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=" / " , lowerCAmelCase : Dict=" // " , lowerCAmelCase : int=5 , lowerCAmelCase : Optional[Any]=300 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[Any]=8 , lowerCAmelCase : int="wiki_dpr" , lowerCAmelCase : Optional[int]="train" , lowerCAmelCase : Any="compressed" , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=False , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=False , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Optional[Any] , )-> int:
"""simple docstring"""
super().__init__(
bos_token_id=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , prefix=lowerCAmelCase , vocab_size=lowerCAmelCase , **lowerCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase = kwargs.pop('''question_encoder''' )
UpperCAmelCase = question_encoder_config.pop('''model_type''' )
UpperCAmelCase = kwargs.pop('''generator''' )
UpperCAmelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = reduce_loss
UpperCAmelCase = label_smoothing
UpperCAmelCase = exclude_bos_score
UpperCAmelCase = do_marginalize
UpperCAmelCase = title_sep
UpperCAmelCase = doc_sep
UpperCAmelCase = n_docs
UpperCAmelCase = max_combined_length
UpperCAmelCase = dataset
UpperCAmelCase = dataset_split
UpperCAmelCase = index_name
UpperCAmelCase = retrieval_vector_size
UpperCAmelCase = retrieval_batch_size
UpperCAmelCase = passages_path
UpperCAmelCase = index_path
UpperCAmelCase = use_dummy_dataset
UpperCAmelCase = output_retrieved
UpperCAmelCase = do_deduplication
UpperCAmelCase = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase = getattr(self.generator , '''forced_eos_token_id''' , lowerCAmelCase )
@classmethod
def a__( cls : Any , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : Optional[int] )-> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCAmelCase )
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.question_encoder.to_dict()
UpperCAmelCase = self.generator.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 50 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _snake_case ):
if not nums:
return 0
UpperCAmelCase__ : Optional[int] = nums[0]
UpperCAmelCase__ : str = 0
for num in nums[1:]:
UpperCAmelCase__ , UpperCAmelCase__ : str = (
max_excluding + num,
max(_snake_case ,_snake_case ),
)
return max(_snake_case ,_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
UpperCamelCase__ = {
'junnyu/roformer_chinese_small': 15_36,
'junnyu/roformer_chinese_base': 15_36,
'junnyu/roformer_chinese_char_small': 5_12,
'junnyu/roformer_chinese_char_base': 5_12,
'junnyu/roformer_small_discriminator': 1_28,
'junnyu/roformer_small_generator': 1_28,
}
UpperCamelCase__ = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class a ( lowercase ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Optional[int] = RoFormerTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
):
UpperCAmelCase__ : Any = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Union[str, Any] = strip_accents
UpperCAmelCase__ : Dict = pre_tok_class(**UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = do_lower_case
def __getstate__( self ):
UpperCAmelCase__ : int = self.__dict__.copy()
UpperCAmelCase__ : int = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = d
UpperCAmelCase__ : List[str] = self.__dict__['_tokenizer'].get_vocab()
UpperCAmelCase__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : Any = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , **UpperCamelCase_ , ):
UpperCAmelCase__ : int = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
| 110 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class snake_case__(Generic[T] ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : list[T] , SCREAMING_SNAKE_CASE : Callable[[T, T], T] ):
lowercase__ : Any | T = None
lowercase__ : int = len(SCREAMING_SNAKE_CASE )
lowercase__ : list[T] = [any_type for _ in range(self.N )] + arr
lowercase__ : List[Any] = fnc
self.build()
def snake_case ( self : Union[str, Any] ):
for p in range(self.N - 1 , 0 , -1 ):
lowercase__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : T ):
p += self.N
lowercase__ : Union[str, Any] = v
while p > 1:
lowercase__ : Tuple = p // 2
lowercase__ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): # noqa: E741
lowercase__ , lowercase__ : List[str] = l + self.N, r + self.N
lowercase__ : T | None = None
while l <= r:
if l % 2 == 1:
lowercase__ : Union[str, Any] = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
lowercase__ : List[str] = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[r] )
lowercase__ , lowercase__ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase__ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCAmelCase__ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCAmelCase__ = SegmentTree(test_array, min)
lowerCAmelCase__ = SegmentTree(test_array, max)
lowerCAmelCase__ = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ):
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
lowercase__ : str = reduce(lowerCamelCase__ , test_array[i : j + 1] )
lowercase__ : Union[str, Any] = reduce(lowerCamelCase__ , test_array[i : j + 1] )
lowercase__ : str = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert max_range == max_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert sum_range == sum_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self , *,
__snake_case = 4 , __snake_case = 768 , __snake_case , __snake_case , ) -> str:
'''simple docstring'''
super().__init__()
__a =nn.Parameter(torch.zeros(__snake_case ) )
# parameters for additional clip time embeddings
__a =nn.Linear(__snake_case , __snake_case )
__a =nn.Linear(__snake_case , __snake_case )
# parameters for encoder hidden states
__a =clip_extra_context_tokens
__a =nn.Linear(
__snake_case , self.clip_extra_context_tokens * cross_attention_dim )
__a =nn.Linear(__snake_case , __snake_case )
__a =nn.LayerNorm(__snake_case )
def __magic_name__ ( self , *, __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__a =image_embeddings.shape[0]
__a =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__a =classifier_free_guidance_embeddings.expand(
__snake_case , -1 )
__a =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__a =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__a =self.embedding_proj(__snake_case )
__a =self.clip_image_embeddings_project_to_time_embeddings(__snake_case )
__a =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__a =self.clip_extra_context_tokens_proj(__snake_case )
__a =clip_extra_context_tokens.reshape(__snake_case , -1 , self.clip_extra_context_tokens )
__a =clip_extra_context_tokens.permute(0 , 2 , 1 )
__a =self.encoder_hidden_states_proj(__snake_case )
__a =self.text_encoder_hidden_states_norm(__snake_case )
__a =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 242 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "▁"
_lowerCAmelCase : Optional[int] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowerCAmelCase : Optional[Any] = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_lowerCAmelCase : Any = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<pad>" , __snake_case="<unk>" , __snake_case="m2m100" , __snake_case = None , __snake_case=8 , **__snake_case , ) -> None:
'''simple docstring'''
__a ={} if sp_model_kwargs is None else sp_model_kwargs
__a =language_codes
__a =FAIRSEQ_LANGUAGE_CODES[language_codes]
__a ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__a =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__snake_case )
for lang_code in fairseq_language_code
if self.get_lang_token(__snake_case ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__snake_case , tgt_lang=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , language_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__snake_case , **__snake_case , )
__a =vocab_file
__a =load_json(__snake_case )
__a ={v: k for k, v in self.encoder.items()}
__a =spm_file
__a =load_spm(__snake_case , self.sp_model_kwargs )
__a =len(self.encoder )
__a ={
self.get_lang_token(__snake_case ): self.encoder_size + i for i, lang_code in enumerate(__snake_case )
}
__a ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__snake_case )}
__a ={v: k for k, v in self.lang_token_to_id.items()}
__a =src_lang if src_lang is not None else 'en'
__a =tgt_lang
__a =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__a =num_madeup_words
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__snake_case , self.unk_token )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =[]
__a =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
__a =[]
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
__a =[1] * len(self.prefix_tokens )
__a =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a ={self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
__a =self.__dict__.copy()
__a =None
return state
def __setstate__( self , __snake_case ) -> None:
'''simple docstring'''
__a =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a ={}
__a =load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
__a =Path(__snake_case )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__a =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__a =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , 'wb' ) as fi:
__a =self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def __magic_name__ ( self , __snake_case , __snake_case = "en" , __snake_case = None , __snake_case = "ro" , **__snake_case , ) -> BatchEncoding:
'''simple docstring'''
__a =src_lang
__a =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__a =src_lang
__a =self(__snake_case , add_special_tokens=__snake_case , **__snake_case )
__a =self.get_lang_id(__snake_case )
__a =tgt_lang_id
return inputs
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
__a =self.lang_token_to_id[lang_token]
__a =[self.cur_lang_id]
__a =[self.eos_token_id]
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
__a =self.lang_token_to_id[lang_token]
__a =[self.cur_lang_id]
__a =[self.eos_token_id]
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
return self.lang_token_to_id[lang_token]
def UpperCamelCase_( _snake_case : str , _snake_case : Dict[str, Any] ):
"""simple docstring"""
__a =sentencepiece.SentencePieceProcessor(**_snake_case )
spm.Load(str(_snake_case ) )
return spm
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
with open(_snake_case , 'r' ) as f:
return json.load(_snake_case )
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
with open(_snake_case , 'w' ) as f:
json.dump(_snake_case , _snake_case , indent=2 )
| 242 | 1 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
snake_case_ : List[str] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __a :
def __init__( self : Any , __magic_name__ : int = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
UpperCAmelCase_ : Optional[Any] = primes[group]['''prime''']
UpperCAmelCase_ : str = primes[group]['''generator''']
UpperCAmelCase_ : Union[str, Any] = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = pow(self.generator , self.__private_key , self.prime )
return hex(__magic_name__ )[2:]
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(__magic_name__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = int(__magic_name__ , base=16 )
if not self.is_valid_public_key(__magic_name__ ):
raise ValueError('''Invalid public key''' )
UpperCAmelCase_ : Tuple = pow(__magic_name__ , self.__private_key , self.prime )
return shaaaa(str(__magic_name__ ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int , __magic_name__ : int ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(__magic_name__ , (prime - 1) // 2 , __magic_name__ ) == 1
)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int = 14 ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = int(__magic_name__ , base=16 )
UpperCAmelCase_ : Union[str, Any] = int(__magic_name__ , base=16 )
UpperCAmelCase_ : Any = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(__magic_name__ , __magic_name__ ):
raise ValueError('''Invalid public key''' )
UpperCAmelCase_ : List[Any] = pow(__magic_name__ , __magic_name__ , __magic_name__ )
return shaaaa(str(__magic_name__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__A : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def a__ ( self :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=_UpperCamelCase ,tokenizer=_UpperCamelCase ,candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def a__ ( self :Dict ,_UpperCamelCase :Any ,_UpperCamelCase :int ):
snake_case_ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics""" )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
# No kwarg
snake_case_ : List[str] = classifier("""Who are you voting for in 2020?""" ,["""politics"""] )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
snake_case_ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics"""] )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
snake_case_ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics, public health""" )
self.assertEqual(
_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
snake_case_ : Any = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
snake_case_ : List[Any] = classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""This text is about {}""" )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Union[str, Any] = classifier(["""I am happy"""] ,["""positive""", """negative"""] )
self.assertEqual(
_UpperCamelCase ,[
{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(1 )
] ,)
snake_case_ : Union[str, Any] = classifier(["""I am happy""", """I am sad"""] ,["""positive""", """negative"""] )
self.assertEqual(
_UpperCamelCase ,[
{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(2 )
] ,)
with self.assertRaises(_UpperCamelCase ):
classifier("""""" ,candidate_labels="""politics""" )
with self.assertRaises(_UpperCamelCase ):
classifier(_UpperCamelCase ,candidate_labels="""politics""" )
with self.assertRaises(_UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels="""""" )
with self.assertRaises(_UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels=_UpperCamelCase )
with self.assertRaises(_UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""Not formatting template""" ,)
with self.assertRaises(_UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template=_UpperCamelCase ,)
self.run_entailment_id(_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Pipeline ):
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Union[str, Any] = config.labelaid
snake_case_ : Optional[Any] = zero_shot_classifier.entailment_id
snake_case_ : List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
snake_case_ : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
snake_case_ : int = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
snake_case_ : Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
snake_case_ : Optional[Any] = original_labelaid
self.assertEqual(_UpperCamelCase ,zero_shot_classifier.entailment_id )
@require_torch
def a__ ( self :Any ):
snake_case_ : Dict = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_0_0 ,candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
snake_case_ : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} ,)
@require_tf
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""tf""" ,)
snake_case_ : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} ,)
@slow
@require_torch
def a__ ( self :Optional[int] ):
snake_case_ : Dict = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""pt""" )
snake_case_ : str = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} ,)
snake_case_ : int = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=_UpperCamelCase ,)
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} ,)
@slow
@require_tf
def a__ ( self :Optional[int] ):
snake_case_ : List[str] = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""tf""" )
snake_case_ : str = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} ,)
snake_case_ : Optional[int] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=_UpperCamelCase ,)
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} ,) | 334 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Tuple = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 330 | '''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = logging.get_logger()
# the current default level is logging.WARNING
__lowerCAmelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> str:
__lowerCAmelCase = logging.get_verbosity()
__lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__lowerCAmelCase = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def a ( self : Optional[Any] ) -> List[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = logging.log_levels[env_level_str]
__lowerCAmelCase = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__lowerCAmelCase = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def a ( self : int ) -> List[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCAmelCase = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def a ( self : str ) -> Optional[Any]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__lowerCAmelCase = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + """\n""" )
def UpperCamelCase_ ( ) -> List[str]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 330 | 1 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
if not head:
return True
# split the list to two parts
_lowerCamelCase, _lowerCamelCase : List[str] = head.next, head
while fast and fast.next:
_lowerCamelCase : Union[str, Any] = fast.next.next
_lowerCamelCase : str = slow.next
_lowerCamelCase : Any = slow.next
_lowerCamelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCamelCase : int = None
while second:
_lowerCamelCase : Union[str, Any] = second.next
_lowerCamelCase : Dict = node
_lowerCamelCase : List[Any] = second
_lowerCamelCase : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCamelCase : List[Any] = node.next
_lowerCamelCase : Dict = head.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCamelCase : Tuple = head
while fast and fast.next:
_lowerCamelCase, _lowerCamelCase : str = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCamelCase : Dict = [slow.val]
while slow.next:
_lowerCamelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCamelCase : Optional[int] = cur.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if not head or not head.next:
return True
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
_lowerCamelCase : Any = [pos]
_lowerCamelCase : Any = head.next
pos += 1
_lowerCamelCase : str = pos - 1
_lowerCamelCase : Optional[int] = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCamelCase : Optional[int] = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 46 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 178 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE( ) -> int:
a__ : List[str] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a__ : Optional[Any] = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
a__ : List[str] = get_dataset()
a__ : str = make_duplicate_clusters(__UpperCAmelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self ):
"""simple docstring"""
a__ : int = get_dataset()
a__ , a__ : Any = deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __UpperCAmelCase )
| 207 | 1 |
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = min(lowercase ) # min() finds the minimum value
lowerCamelCase_ = max(lowercase ) # max() finds the maximum value
lowerCamelCase_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCamelCase_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase , lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCamelCase_ = 0
for count in range(lowercase ):
while holes[count] > 0:
holes[count] -= 1
lowerCamelCase_ = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase )
print('Sorted order is:' , ' '.join(lowercase ) )
if __name__ == "__main__":
main()
| 70 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[Any] = "open-llama"
def __init__(self ,_lowerCamelCase=100000 ,_lowerCamelCase=4096 ,_lowerCamelCase=11008 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase="silu" ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-6 ,_lowerCamelCase=True ,_lowerCamelCase=0 ,_lowerCamelCase=1 ,_lowerCamelCase=2 ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Dict:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = rms_norm_eps
__lowercase = use_cache
__lowercase = kwargs.pop(
'''use_memorry_efficient_attention''' ,_lowerCamelCase )
__lowercase = hidden_dropout_prob
__lowercase = attention_dropout_prob
__lowercase = use_stable_embedding
__lowercase = shared_input_output_embedding
__lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,tie_word_embeddings=_lowerCamelCase ,**_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
__lowercase = self.rope_scaling.get('''type''' ,_lowerCamelCase )
__lowercase = self.rope_scaling.get('''factor''' ,_lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase ,_lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 502 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase : Any = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase : Union[str, Any] = BASE_URL + "/user"
# https://github.com/settings/tokens
__lowerCAmelCase : Optional[int] = os.environ.get('USER_TOKEN', '')
def a_ (_lowerCAmelCase : Dict )-> str:
snake_case: Dict = {
"""Authorization""": F"token {auth_token}",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__A , headers=__A ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 715 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = torch.device('cpu')
def a_ ()-> Dict:
snake_case: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case: List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
def a_ (_lowerCAmelCase : List[str] )-> Any:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def a_ (_lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] )-> str:
snake_case: Dict = dct.pop(_lowerCAmelCase )
snake_case: int = val
def a_ (_lowerCAmelCase : Optional[int] )-> int:
snake_case: Optional[Any] = []
for k in state_dict.keys():
snake_case: Tuple = k
if ".pwconv" in k:
snake_case: Tuple = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
snake_case: int = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
snake_case: List[Any] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
snake_case: Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
snake_case: Tuple = k_new.split(""".""" )
if ls[2].isdigit():
snake_case: int = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
snake_case: Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
snake_case: Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case: Tuple = 1000
snake_case: str = """huggingface/label-files"""
snake_case: Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case: Optional[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case: Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case: Any = idalabel
snake_case: List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case: List[Any] = [3, 3, 6, 4]
snake_case: Any = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
snake_case: List[Any] = [3, 3, 9, 6]
snake_case: Optional[int] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
snake_case: str = [4, 3, 10, 5]
snake_case: str = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
snake_case: int = [4, 4, 12, 6]
snake_case: Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
snake_case: Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" , check_hash=_lowerCAmelCase )
else:
snake_case: Optional[Any] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
snake_case: str = checkpoint
snake_case: str = create_rename_keys(_lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case: Any = SwiftFormerForImageClassification(_lowerCAmelCase ).eval()
hf_model.load_state_dict(_lowerCAmelCase )
# prepare test inputs
snake_case: Any = prepare_img()
snake_case: str = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
snake_case: Tuple = processor(images=_lowerCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
snake_case: Any = get_expected_output(_lowerCAmelCase )
snake_case: List[Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _lowerCAmelCase , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowerCAmelCase : List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 164 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCamelCase = """facebook/wmt19-en-de"""
UpperCamelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCamelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCamelCase = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
UpperCamelCase = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
UpperCamelCase = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
UpperCamelCase = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 104 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
super().__init__()
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = False
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
A__ = TaConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , d_model=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , feed_forward_proj=SCREAMING_SNAKE_CASE__ , is_decoder=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , )
A__ = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__ ):
A__ = TaBlock(SCREAMING_SNAKE_CASE__ )
self.encoders.append(SCREAMING_SNAKE_CASE__ )
A__ = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
A__ = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = self.token_embedder(SCREAMING_SNAKE_CASE__ )
A__ = encoder_input_tokens.shape[1]
A__ = torch.arange(SCREAMING_SNAKE_CASE__ , device=encoder_input_tokens.device )
x += self.position_encoding(SCREAMING_SNAKE_CASE__ )
A__ = self.dropout_pre(SCREAMING_SNAKE_CASE__ )
# inverted the attention mask
A__ = encoder_input_tokens.size()
A__ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for lyr in self.encoders:
A__ = lyr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
A__ = self.layer_norm(SCREAMING_SNAKE_CASE__ )
return self.dropout_post(SCREAMING_SNAKE_CASE__ ), encoder_inputs_mask
| 104 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
A = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
A = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
A = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
A = f"""down_blocks.{i}.resnets.{j}."""
A = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
A = f"""down_blocks.{i}.attentions.{j}."""
A = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
A = f"""up_blocks.{i}.resnets.{j}."""
A = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
A = f"""up_blocks.{i}.attentions.{j}."""
A = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
A = f"""down_blocks.{i}.downsamplers.0.conv."""
A = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
A = f"""up_blocks.{i}.upsamplers.0."""
A = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
A = 'mid_block.attentions.0.'
A = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
A = f"""mid_block.resnets.{j}."""
A = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a(lowercase__ ):
'''simple docstring'''
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
snake_case_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
snake_case_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
snake_case_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
A = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
A = f"""encoder.down_blocks.{i}.resnets.{j}."""
A = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
A = f"""down_blocks.{i}.downsamplers.0."""
A = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
A = f"""up_blocks.{i}.upsamplers.0."""
A = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
A = f"""decoder.up_blocks.{i}.resnets.{j}."""
A = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
A = f"""mid_block.resnets.{i}."""
A = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
A = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a(lowercase__ ):
'''simple docstring'''
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
snake_case_ = v.replace(lowercase__ , lowercase__ )
snake_case_ = v
snake_case_ = {v: vae_state_dict[k] for k, v in mapping.items()}
snake_case_ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
snake_case_ = reshape_weight_for_sd(lowercase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
A = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
A = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
A = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
A = {'q': 0, 'k': 1, 'v': 2}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
snake_case_ = k[: -len('.q_proj.weight' )]
snake_case_ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
snake_case_ = [None, None, None]
snake_case_ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
snake_case_ = k[: -len('.q_proj.bias' )]
snake_case_ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
snake_case_ = [None, None, None]
snake_case_ = v
continue
snake_case_ = textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ )
snake_case_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
snake_case_ = textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ )
snake_case_ = torch.cat(lowercase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
snake_case_ = textenc_pattern.sub(lambda lowercase__ : protected[re.escape(m.group(0 ) )] , lowercase__ )
snake_case_ = torch.cat(lowercase__ )
return new_state_dict
def a(lowercase__ ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
A = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
A = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
A = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
A = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
A = load_file(unet_path, device='cpu')
else:
A = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
A = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
A = load_file(vae_path, device='cpu')
else:
A = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
A = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
A = load_file(text_enc_path, device='cpu')
else:
A = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
A = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
A = convert_unet_state_dict(unet_state_dict)
A = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
A = convert_vae_state_dict(vae_state_dict)
A = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
A = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
A = {'transformer.' + k: v for k, v in text_enc_dict.items()}
A = convert_text_enc_state_dict_vaa(text_enc_dict)
A = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
A = convert_text_enc_state_dict(text_enc_dict)
A = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
A = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
A = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
A = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 46 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCamelCase : Optional[Any] ='\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_UpperCamelCase : int ='\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_UpperCamelCase : Dict ='\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''http://www.cs.umd.edu/~snover/tercom/''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] ,reference_urls=[
'''https://github.com/jhclark/tercom''',
] ,)
def A__ ( self ,A__ ,A__ ,A__ = False ,A__ = False ,A__ = False ,A__ = False ,):
_A : Union[str, Any] = len(references[0] )
if any(len(A__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_A : str = [[refs[i] for refs in references] for i in range(A__ )]
_A : int = TER(
normalized=A__ ,no_punct=A__ ,asian_support=A__ ,case_sensitive=A__ ,)
_A : Dict = sb_ter.corpus_score(A__ ,A__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 206 |
def a__ (__lowercase :str , __lowercase :str ) -> float:
def get_matched_characters(__lowercase :str , __lowercase :str ) -> str:
_A : Union[str, Any] = []
_A : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_A : Tuple = int(max(0 , i - limit ) )
_A : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__lowercase )
_A : List[Any] = f"""{_stra[0:_stra.index(__lowercase )]} {_stra[_stra.index(__lowercase ) + 1:]}"""
return "".join(__lowercase )
# matching characters
_A : str = get_matched_characters(__lowercase , __lowercase )
_A : Optional[Any] = get_matched_characters(__lowercase , __lowercase )
_A : Any = len(__lowercase )
# transposition
_A : str = (
len([(ca, ca) for ca, ca in zip(__lowercase , __lowercase ) if ca != ca] ) // 2
)
if not match_count:
_A : Optional[int] = 0.0
else:
_A : str = (
1
/ 3
* (
match_count / len(__lowercase )
+ match_count / len(__lowercase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_A : Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 206 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __UpperCAmelCase ( UpperCamelCase__ :Tuple ) -> Any:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __UpperCAmelCase ( UpperCamelCase__ :List[str] ) -> Union[str, Any]:
snake_case__ : Tuple = create_tensor(UpperCamelCase__ )
snake_case__ : List[str] = gather(UpperCamelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> Optional[Any]:
snake_case__ : Tuple = [state.process_index]
snake_case__ : Tuple = gather_object(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == state.num_processes, F'''{gathered_obj}, {len(UpperCamelCase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __UpperCAmelCase ( UpperCamelCase__ :Optional[int] ) -> Optional[Any]:
snake_case__ : Union[str, Any] = create_tensor(UpperCamelCase__ )
snake_case__ : Tuple = broadcast(UpperCamelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __UpperCAmelCase ( UpperCamelCase__ :Dict ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
snake_case__ : Dict = torch.arange(state.num_processes + 1 ).to(state.device )
else:
snake_case__ : Any = torch.arange(state.num_processes ).to(state.device )
snake_case__ : List[str] = pad_across_processes(UpperCamelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __UpperCAmelCase ( UpperCamelCase__ :Any ) -> Union[str, Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
snake_case__ : Any = create_tensor(UpperCamelCase__ )
snake_case__ : Tuple = reduce(UpperCamelCase__ , '''sum''' )
snake_case__ : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), F'''{reduced_tensor} != {truth_tensor}'''
def __UpperCAmelCase ( UpperCamelCase__ :Any ) -> Optional[int]:
# For now runs on only two processes
if state.num_processes != 2:
return
snake_case__ : Dict = create_tensor(UpperCamelCase__ )
snake_case__ : Union[str, Any] = reduce(UpperCamelCase__ , '''mean''' )
snake_case__ : Tuple = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), F'''{reduced_tensor} != {truth_tensor}'''
def __UpperCAmelCase ( UpperCamelCase__ :str ) -> str:
# For xla_spawn (TPUs)
main()
def __UpperCAmelCase ( ) -> Union[str, Any]:
snake_case__ : str = PartialState()
state.print(F'''State: {state}''' )
state.print('''testing gather''' )
test_gather(UpperCamelCase__ )
state.print('''testing gather_object''' )
test_gather_object(UpperCamelCase__ )
state.print('''testing broadcast''' )
test_broadcast(UpperCamelCase__ )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(UpperCamelCase__ )
state.print('''testing reduce_sum''' )
test_reduce_sum(UpperCamelCase__ )
state.print('''testing reduce_mean''' )
test_reduce_mean(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 574 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_lowercase : Union[str, Any] =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowercase : List[Any] =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __UpperCAmelCase ( UpperCamelCase__ :list[list[int]] ) -> list[list[int]]:
snake_case__ : str = []
for i in range(len(UpperCamelCase__ ) ):
snake_case__ : int = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case__ : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case__ : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase__ )
return next_generation
def __UpperCAmelCase ( UpperCamelCase__ :list[list[int]] , UpperCamelCase__ :int ) -> list[Image.Image]:
snake_case__ : List[Any] = []
for _ in range(UpperCamelCase__ ):
# Create output image
snake_case__ : Any = Image.new('''RGB''' , (len(cells[0] ), len(UpperCamelCase__ )) )
snake_case__ : Tuple = img.load()
# Save cells to image
for x in range(len(UpperCamelCase__ ) ):
for y in range(len(cells[0] ) ):
snake_case__ : List[str] = 255 - cells[y][x] * 255
snake_case__ : int = (colour, colour, colour)
# Save image
images.append(UpperCamelCase__ )
snake_case__ : Any = new_generation(UpperCamelCase__ )
return images
if __name__ == "__main__":
_lowercase : str =generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 574 | 1 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self ) -> str:
snake_case : Optional[Any] = 0
snake_case : List[str] = 0
snake_case : Union[str, Any] = {}
def snake_case_ ( self ,__a ) -> Optional[int]:
if vertex not in self.adjacency:
snake_case : List[Any] = {}
self.num_vertices += 1
def snake_case_ ( self ,__a ,__a ,__a ) -> List[str]:
self.add_vertex(__a )
self.add_vertex(__a )
if head == tail:
return
snake_case : int = weight
snake_case : Union[str, Any] = weight
def snake_case_ ( self ) -> int:
snake_case : Any = self.get_edges()
for edge in edges:
snake_case , snake_case , snake_case : Any = edge
edges.remove((tail, head, weight) )
for i in range(len(__a ) ):
snake_case : Optional[int] = list(edges[i] )
edges.sort(key=lambda __a : e[2] )
for i in range(len(__a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case : Any = edges[i][2] + 1
for edge in edges:
snake_case , snake_case , snake_case : List[str] = edge
snake_case : List[Any] = weight
snake_case : Union[str, Any] = weight
def __str__( self ) -> Union[str, Any]:
snake_case : Optional[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case : Tuple = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def snake_case_ ( self ) -> str:
snake_case : Union[str, Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def snake_case_ ( self ) -> List[Any]:
return self.adjacency.keys()
@staticmethod
def snake_case_ ( __a=None ,__a=None ) -> Union[str, Any]:
snake_case : str = Graph()
if vertices is None:
snake_case : Union[str, Any] = []
if edges is None:
snake_case : Union[str, Any] = []
for vertex in vertices:
g.add_vertex(__a )
for edge in edges:
g.add_edge(*__a )
return g
class _a :
'''simple docstring'''
def __init__( self ) -> Tuple:
snake_case : Any = {}
snake_case : Any = {}
def __len__( self ) -> Dict:
return len(self.parent )
def snake_case_ ( self ,__a ) -> Dict:
if item in self.parent:
return self.find(__a )
snake_case : List[Any] = item
snake_case : Union[str, Any] = 0
return item
def snake_case_ ( self ,__a ) -> List[Any]:
if item not in self.parent:
return self.make_set(__a )
if item != self.parent[item]:
snake_case : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def snake_case_ ( self ,__a ,__a ) -> Optional[Any]:
snake_case : str = self.find(__a )
snake_case : Dict = self.find(__a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case : int = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case : Optional[Any] = roota
return roota
return None
@staticmethod
def snake_case_ ( __a ) -> Optional[int]:
snake_case : Union[str, Any] = graph.num_vertices
snake_case : List[Any] = Graph.UnionFind()
snake_case : List[Any] = []
while num_components > 1:
snake_case : Tuple = {}
for vertex in graph.get_vertices():
snake_case : int = -1
snake_case : List[str] = graph.get_edges()
for edge in edges:
snake_case , snake_case , snake_case : Optional[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case , snake_case , snake_case : Any = edge
snake_case : List[Any] = union_find.find(__a )
snake_case : Tuple = union_find.find(__a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case : str = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case , snake_case , snake_case : str = cheap_edge[vertex]
if union_find.find(__a ) != union_find.find(__a ):
union_find.union(__a ,__a )
mst_edges.append(cheap_edge[vertex] )
snake_case : Tuple = num_components - 1
snake_case : Tuple = Graph.build(edges=__a )
return mst
| 116 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 1 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int = 100):
UpperCamelCase = (n * (n + 1) // 2) ** 2
UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False , **lowerCamelCase__ ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [vocab_size]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
UpperCamelCase = keep_order
UpperCamelCase = []
UpperCamelCase = []
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.n_clusters > 0:
UpperCamelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_weight''' )
UpperCamelCase = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' , )
self.out_projs.append(lowerCamelCase__ )
else:
self.out_projs.append(lowerCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.d_embed // (self.div_val**i)
UpperCamelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' )
self.out_projs.append(lowerCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase__ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = x
if proj is not None:
UpperCamelCase = tf.einsum('''ibd,ed->ibe''' , lowerCamelCase__ , lowerCamelCase__ )
return tf.einsum('''ibd,nd->ibn''' , lowerCamelCase__ , lowerCamelCase__ ) + b
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = shape_list(lowerCamelCase__ )
UpperCamelCase = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=False ):
'''simple docstring'''
UpperCamelCase = 0
if self.n_clusters == 0:
UpperCamelCase = self._logit(lowerCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ , axis=-1 )
else:
UpperCamelCase = shape_list(lowerCamelCase__ )
UpperCamelCase = []
UpperCamelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase = (target >= l_idx) & (target < r_idx)
UpperCamelCase = tf.where(lowerCamelCase__ )
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ ) - l_idx
if self.div_val == 1:
UpperCamelCase = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i][0]
UpperCamelCase = self.out_layers[i][1]
if i == 0:
UpperCamelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[0] )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
else:
UpperCamelCase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[i] )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase__ )
if target is not None:
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase__ , -cur_logprob , shape_list(lowerCamelCase__ ) )
UpperCamelCase = tf.concat(lowerCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase = tf.reduce_mean(lowerCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 350 | 0 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : int = 50 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Union[str, Any] , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCamelCase =torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
_UpperCamelCase =image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCamelCase =self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase =self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
_UpperCamelCase =(image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase =self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCamelCase__ ), "This is a local test"
| 404 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=99 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[Any]=None , ) -> str:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_input_mask
_UpperCamelCase =use_token_type_ids
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =type_vocab_size
_UpperCamelCase =type_sequence_label_size
_UpperCamelCase =initializer_range
_UpperCamelCase =num_labels
_UpperCamelCase =num_choices
_UpperCamelCase =scope
def UpperCamelCase__ ( self : Tuple ) -> List[Any]:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =None
if self.use_input_mask:
_UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase =None
if self.use_token_type_ids:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase =None
_UpperCamelCase =None
_UpperCamelCase =None
if self.use_labels:
_UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : Tuple ) -> List[Any]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Dict:
_UpperCamelCase =NystromformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
_UpperCamelCase =model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
_UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =NystromformerForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
_UpperCamelCase =NystromformerForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> List[str]:
_UpperCamelCase =self.num_labels
_UpperCamelCase =NystromformerForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> int:
_UpperCamelCase =self.num_labels
_UpperCamelCase =NystromformerForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> str:
_UpperCamelCase =self.num_choices
_UpperCamelCase =NystromformerForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase =model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase =self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) =config_and_inputs
_UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : Any ) -> int:
_UpperCamelCase =NystromformerModelTester(self )
_UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase__ ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : str ) -> List[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase =type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Tuple ) -> Any:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> str:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[int] ) -> Dict:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def UpperCamelCase__ ( self : Any ) -> Optional[int]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self : int ) -> List[str]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase =NystromformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : List[Any] ) -> int:
_UpperCamelCase =NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCamelCase =model(UpperCamelCase__ )[0]
_UpperCamelCase =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
_UpperCamelCase =torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase ='''the [MASK] of Belgium is Brussels'''
_UpperCamelCase =AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase =tokenizer(UpperCamelCase__ , return_tensors='''pt''' )
with torch.no_grad():
_UpperCamelCase =model(encoding.input_ids ).logits
_UpperCamelCase =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , '''capital''' )
| 404 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : Optional[int] , a_ : int ):
__a = int(__lowerCAmelCase )
# Initialize Result
__a = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 712 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int ):
__a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE ( a_ : int = 100 ):
__a = 1
__a = 2
for i in range(2 , max_n + 1 ):
__a = pre_numerator
__a = 2 * i // 3 if i % 3 == 0 else 1
__a = cur_numerator
__a = e_cont * pre_numerator + temp
return sum_digits(a_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 490 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = 0
def UpperCAmelCase ( self : Optional[int] ) -> Any:
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[Any] = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Dict = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Any ) -> Any:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Tuple = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCAmelCase : Union[str, Any] = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Optional[Any] = Path(__lowercase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__lowercase ).to_dict()
config_dict.pop("""image_processor_type""" )
__UpperCAmelCase : List[Any] = CLIPImageProcessor(**__lowercase )
# save in new folder
model_config.save_pretrained(__lowercase )
config.save_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(__lowercase )
# make sure private variable is not incorrectly saved
__UpperCAmelCase : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[int] = Path(__lowercase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , """clip-base is not a local folder and is not a valid model identifier""" ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
with self.assertRaisesRegex(
__lowercase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(__lowercase , revision="""aaaaaa""" )
def UpperCAmelCase ( self : Dict ) -> Any:
with self.assertRaisesRegex(
__lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__UpperCAmelCase : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoImageProcessor.register(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : int = Path(__lowercase ) / """preprocessor_config.json"""
__UpperCAmelCase : Any = Path(__lowercase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__lowercase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__lowercase , """w""" ) )
__UpperCAmelCase : List[Any] = CustomImageProcessor.from_pretrained(__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self : Optional[Any] ) -> str:
class a ( lowercase__ ):
"""simple docstring"""
a : Any = True
try:
AutoConfig.register("""custom""" , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 63 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Dict = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , )
return model
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
_lowerCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def A_ ( self ):
_lowerCamelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Optional[int] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowerCamelCase : List[str] = DDPMScheduler()
_lowerCamelCase : List[Any] = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
_lowerCamelCase : List[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(42 )
_lowerCamelCase : Union[str, Any] = pipe(generator=lowercase , steps=4 )
_lowerCamelCase : Optional[Any] = output.audios[0]
_lowerCamelCase : int = output.images[0]
_lowerCamelCase : Dict = torch.Generator(device=lowercase ).manual_seed(42 )
_lowerCamelCase : Dict = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
_lowerCamelCase : List[str] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowerCamelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowerCamelCase : Dict = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10]
_lowerCamelCase : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_lowerCamelCase : List[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Tuple = self.dummy_vqvae_and_unet
_lowerCamelCase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
_lowerCamelCase : Tuple = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
_lowerCamelCase : Any = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_lowerCamelCase : Optional[int] = torch.Generator(device=lowercase ).manual_seed(42 )
_lowerCamelCase : Dict = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=10 )
_lowerCamelCase : str = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowerCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowerCamelCase : Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_lowerCamelCase : Dict = self.dummy_unet_condition
_lowerCamelCase : Optional[int] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
_lowerCamelCase : Dict = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
_lowerCamelCase : Optional[int] = torch.rand((1, 1, 10) )
_lowerCamelCase : Optional[Any] = pipe(generator=lowercase , encoding=lowercase )
_lowerCamelCase : Dict = output.images[0]
_lowerCamelCase : Tuple = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowerCamelCase : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : Optional[Any] = torch_device
_lowerCamelCase : Dict = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
_lowerCamelCase : List[str] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : int = torch.Generator(device=lowercase ).manual_seed(42 )
_lowerCamelCase : Tuple = pipe(generator=lowercase )
_lowerCamelCase : Dict = output.audios[0]
_lowerCamelCase : Dict = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowerCamelCase : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10]
_lowerCamelCase : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 630 | 0 |
from scipy.stats import pearsonr
import datasets
__UpperCamelCase : Tuple = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__UpperCamelCase : Any = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__UpperCamelCase : Tuple = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=False ):
'''simple docstring'''
if return_pvalue:
__lowerCamelCase : List[Any] = pearsonr(UpperCamelCase__ , UpperCamelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )}
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['LayoutLMv2FeatureExtractor']
__UpperCamelCase : Union[str, Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 458 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
__A : int = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class __A ( lowerCAmelCase ):
@staticmethod
def lowercase__ ( UpperCAmelCase_ : ArgumentParser ):
lowerCAmelCase : List[str] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=UpperCAmelCase_ , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[Any] , ):
lowerCAmelCase : Tuple = logging.get_logger('transformers-cli/converting' )
self._logger.info(f"Loading model {model_type}" )
lowerCAmelCase : List[Any] = model_type
lowerCAmelCase : Tuple = tf_checkpoint
lowerCAmelCase : List[Any] = pytorch_dump_output
lowerCAmelCase : Union[str, Any] = config
lowerCAmelCase : Optional[int] = finetuning_task_name
def lowercase__ ( self : str ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase : List[Any] = self._tf_checkpoint
lowerCAmelCase : str = ''
else:
lowerCAmelCase : List[Any] = self._tf_checkpoint
lowerCAmelCase : Any = ''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase_ , self._config , self._pytorch_dump_output , UpperCAmelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 343 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = '''Hello world! cécé herlolip'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = FairseqRobertaModel.from_pretrained(_UpperCAmelCase )
roberta.eval() # disable dropout
lowerCAmelCase : Dict = roberta.model.encoder.sentence_encoder
lowerCAmelCase : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.cfg.model.encoder_embed_dim, num_hidden_layers=roberta.cfg.model.encoder_layers, num_attention_heads=roberta.cfg.model.encoder_attention_heads, intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, )
if classification_head:
lowerCAmelCase : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:', _UpperCAmelCase )
lowerCAmelCase : Optional[int] = XLMRobertaXLForSequenceClassification(_UpperCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase : Tuple = roberta_sent_encoder.embed_tokens.weight
lowerCAmelCase : Tuple = roberta_sent_encoder.embed_positions.weight
lowerCAmelCase : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCAmelCase : Tuple = roberta_sent_encoder.layer_norm.weight
lowerCAmelCase : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCAmelCase : RobertaAttention = layer.attention
lowerCAmelCase : str = roberta_layer.self_attn_layer_norm.weight
lowerCAmelCase : str = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCAmelCase : str = roberta_layer.self_attn.q_proj.weight
lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.q_proj.bias
lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.k_proj.weight
lowerCAmelCase : Dict = roberta_layer.self_attn.k_proj.bias
lowerCAmelCase : List[Any] = roberta_layer.self_attn.v_proj.weight
lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCAmelCase : List[str] = roberta_layer.self_attn.out_proj.weight
lowerCAmelCase : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCAmelCase : Any = roberta_layer.final_layer_norm.weight
lowerCAmelCase : Any = roberta_layer.final_layer_norm.bias
# intermediate
lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase : Tuple = roberta_layer.fca.weight
lowerCAmelCase : int = roberta_layer.fca.bias
# output
lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase : Optional[Any] = roberta_layer.fca.weight
lowerCAmelCase : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCAmelCase : Any = roberta.model.classification_heads['mnli'].dense.weight
lowerCAmelCase : Any = roberta.model.classification_heads['mnli'].dense.bias
lowerCAmelCase : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight
lowerCAmelCase : Tuple = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCAmelCase : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
lowerCAmelCase : Dict = roberta.model.encoder.lm_head.dense.bias
lowerCAmelCase : int = roberta.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase : Optional[Any] = roberta.model.encoder.lm_head.weight
lowerCAmelCase : Optional[int] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase : torch.Tensor = roberta.encode(_UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
lowerCAmelCase : Any = model(_UpperCAmelCase )[0]
if classification_head:
lowerCAmelCase : Tuple = roberta.model.classification_heads['mnli'](roberta.extract_features(_UpperCAmelCase ) )
else:
lowerCAmelCase : int = roberta.model(_UpperCAmelCase )[0]
print(our_output.shape, their_output.shape )
lowerCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
lowerCAmelCase : str = torch.allclose(_UpperCAmelCase, _UpperCAmelCase, atol=1e-3 )
print('Do both models output the same tensors?', '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_UpperCAmelCase ).mkdir(parents=_UpperCAmelCase, exist_ok=_UpperCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__A : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 343 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _snake_case ( lowercase__):
UpperCamelCase__ : int =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any =["""input_ids""", """attention_mask"""]
UpperCamelCase__ : Optional[int] =RobertaTokenizer
def __init__( self : List[str], __lowercase : Optional[Any]=None, __lowercase : List[str]=None, __lowercase : Any=None, __lowercase : Optional[int]="replace", __lowercase : List[Any]="<s>", __lowercase : str="</s>", __lowercase : List[Any]="</s>", __lowercase : int="<s>", __lowercase : Dict="<unk>", __lowercase : List[Any]="<pad>", __lowercase : List[str]="<mask>", __lowercase : Tuple=False, __lowercase : str=True, **__lowercase : Tuple, ):
super().__init__(
__lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**__lowercase )
lowercase__ = add_prefix_space
lowercase__ = "post_processor"
lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["sep"] )
if "cls" in state:
lowercase__ = tuple(state["cls"] )
lowercase__ = False
if state.get("add_prefix_space", __lowercase ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("trim_offsets", __lowercase ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(__lowercase, state.pop("type" ) )
lowercase__ = component_class(**__lowercase )
setattr(self.backend_tokenizer, __lowercase, __lowercase )
@property
def A__ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : List[str], __lowercase : Any ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value
lowercase__ = value
def A__ ( self : List[Any], *__lowercase : Tuple, **__lowercase : Any ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase, **__lowercase )
def A__ ( self : List[Any], *__lowercase : Tuple, **__lowercase : int ):
lowercase__ = kwargs.get("is_split_into_words", __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase, **__lowercase )
def A__ ( self : List[Any], __lowercase : str, __lowercase : Optional[str] = None ):
lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase )
return tuple(__lowercase )
def A__ ( self : Any, __lowercase : Tuple, __lowercase : Union[str, Any]=None ):
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : Dict, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 37 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# Initialise PyTorch model
UpperCAmelCase__ : Optional[int] = BertConfig.from_json_file(lowerCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase__ : Optional[Any] = BertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 75 | """simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> Union[str, Any]:
pass
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_UpperCamelCase : Optional[Any] = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self , a , a , a ) -> Tuple:
lowercase__ : Tuple = pipeline(
'document-question-answering' , model=a , tokenizer=a , image_processor=a )
lowercase__ : str = INVOICE_URL
lowercase__ : int = list(zip(*apply_tesseract(load_image(a ) , a , '' ) ) )
lowercase__ : Dict = 'What is the placebo?'
lowercase__ : Dict = [
{
'image': load_image(a ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _UpperCAmelCase ( self , a , a ) -> int:
lowercase__ : List[str] = dqa_pipeline(a , top_k=2 )
self.assertEqual(
a , [
[
{'score': ANY(a ), 'answer': ANY(a ), 'start': ANY(a ), 'end': ANY(a )},
{'score': ANY(a ), 'answer': ANY(a ), 'start': ANY(a ), 'end': ANY(a )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowercase__ : str = INVOICE_URL
lowercase__ : Tuple = 'How many cats are there?'
lowercase__ : int = [
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
lowercase__ : int = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , a )
lowercase__ : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , a )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase__ : Any = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__ : Optional[Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(a , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase__ : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__ : List[Any] = []
lowercase__ : int = []
lowercase__ : Dict = dqa_pipeline(image=a , question=a , words=a , boxes=a , top_k=2 )
self.assertEqual(a , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowercase__ : Dict = INVOICE_URL
lowercase__ : List[Any] = 'What is the invoice number?'
lowercase__ : List[str] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : str = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
lowercase__ : int = INVOICE_URL
lowercase__ : str = 'What is the invoice number?'
lowercase__ : List[Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9_948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : Union[str, Any] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9_948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : List[str] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9_948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : str = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=a )
lowercase__ : Tuple = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=a , revision='3dc6de3' , )
lowercase__ : Union[str, Any] = INVOICE_URL
lowercase__ : Any = 'What is the invoice number?'
lowercase__ : List[Any] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowercase__ : Dict = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
lowercase__ : Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
lowercase__ : List[str] = list(zip(*apply_tesseract(load_image(a ) , a , '' ) ) )
# This model should also work if `image` is set to None
lowercase__ : Union[str, Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0_819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=a )
lowercase__ : Any = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=a , revision='3dc6de3' , max_seq_len=5_0 , )
lowercase__ : Tuple = INVOICE_URL
lowercase__ : str = 'What is the invoice number?'
lowercase__ : List[str] = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9_998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
lowercase__ : Tuple = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9_998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
lowercase__ : Optional[Any] = list(zip(*apply_tesseract(load_image(a ) , a , '' ) ) )
# This model should also work if `image` is set to None
lowercase__ : List[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9_998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowercase__ : Optional[Any] = INVOICE_URL
lowercase__ : Optional[int] = 'What is the invoice number?'
lowercase__ : int = dqa_pipeline(image=a , question=a , top_k=2 )
self.assertEqual(nested_simplify(a , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _UpperCAmelCase ( self ) -> str:
pass
| 599 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = R'''\w+[.]\d+'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = re.findall(UpperCamelCase__ , UpperCamelCase__ )
for pat in pats:
__SCREAMING_SNAKE_CASE: int = key.replace(UpperCamelCase__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE: Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE: int = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE: List[Any] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE: int = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE: Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE: int = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE: str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE: Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE: Dict = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=42 ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE: Optional[int] = flax_model.init_weights(PRNGKey(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = flatten_dict(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE: Optional[Any] = rename_key(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Tuple = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Dict = rename_key_and_reshape_tensor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE: Union[str, Any] = jnp.asarray(UpperCamelCase__ )
return unflatten_dict(UpperCamelCase__ )
| 146 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : UNetaDModel
SCREAMING_SNAKE_CASE__ : ScoreSdeVeScheduler
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 2000 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE: str = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE: List[str] = self.unet
__SCREAMING_SNAKE_CASE: Optional[Any] = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Any = sample.to(self.device )
self.scheduler.set_timesteps(_lowerCAmelCase )
self.scheduler.set_sigmas(_lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE: Optional[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE: int = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
__SCREAMING_SNAKE_CASE: int = self.scheduler.step_correct(_lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE: str = model(_lowerCAmelCase , _lowerCAmelCase ).sample
__SCREAMING_SNAKE_CASE: List[str] = self.scheduler.step_pred(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE: int = sample_mean.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE: int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE: Tuple = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 146 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self: Dict , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase_: Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
UpperCamelCase_ =size if size is not None else {"shortest_edge": 224}
UpperCamelCase_ =get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCamelCase_ =crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ =get_size_dict(UpperCamelCase_ , param_name="crop_size" )
UpperCamelCase_ =do_resize
UpperCamelCase_ =size
UpperCamelCase_ =resample
UpperCamelCase_ =do_center_crop
UpperCamelCase_ =crop_size
UpperCamelCase_ =do_rescale
UpperCamelCase_ =rescale_factor
UpperCamelCase_ =do_normalize
UpperCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple , ):
UpperCamelCase_ =get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCamelCase_ =int((256 / 224) * size["shortest_edge"] )
UpperCamelCase_ =get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCamelCase_ ={"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
UpperCamelCase_ , size=(size_dict["height"], size_dict["width"]) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[str] , ):
UpperCamelCase_ =get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[Any] , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: str , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: List[str] , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Dict[str, int]] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Dict[str, int]] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[Union[float, Iterable[float]]] = None , UpperCamelCase_: Optional[Union[float, Iterable[float]]] = None , UpperCamelCase_: Optional[TensorType] = None , UpperCamelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_: int , ):
UpperCamelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ =resample if resample is not None else self.resample
UpperCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ =image_std if image_std is not None else self.image_std
UpperCamelCase_ =size if size is not None else self.size
UpperCamelCase_ =get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
UpperCamelCase_ =crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ =get_size_dict(UpperCamelCase_ , param_name="crop_size" )
UpperCamelCase_ =make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ =[to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
UpperCamelCase_ =[self.resize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images]
if do_center_crop:
UpperCamelCase_ =[self.center_crop(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
if do_rescale:
UpperCamelCase_ =[self.rescale(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
if do_normalize:
UpperCamelCase_ =[self.normalize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase_ =[to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
UpperCamelCase_ ={"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 391 |
"""simple docstring"""
from manim import *
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("CPU" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[mem.copy() for i in range(1 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("GPU" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.align_to(UpperCamelCase_ , UpperCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[mem.copy() for i in range(6 )]
UpperCamelCase_ =VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
UpperCamelCase_ =Text("Model" , font_size=24 )
UpperCamelCase_ =Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) , )
UpperCamelCase_ =MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
UpperCamelCase_ =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ =MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=2.5 ) , Write(UpperCamelCase_ ) , Write(UpperCamelCase_ ) )
self.add(UpperCamelCase_ )
UpperCamelCase_ =[]
UpperCamelCase_ =[]
UpperCamelCase_ =[]
for i, rect in enumerate(UpperCamelCase_ ):
UpperCamelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase_ )
cpu_target.generate_target()
UpperCamelCase_ =0.46 / 4
UpperCamelCase_ =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase_ , buff=0.0 )
cpu_targs.append(UpperCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase_ ) )
second_animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) )
self.play(*UpperCamelCase_ )
self.play(*UpperCamelCase_ )
self.wait()
| 391 | 1 |
'''simple docstring'''
import math
def _snake_case ( lowercase , lowercase ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law') | 719 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 9.80_665
def _snake_case ( lowercase , lowercase , lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 697 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : Any = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 130 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ):
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_A , )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase ( self , _A , _A=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create hint
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_A )
else:
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase = torch.from_numpy(np.array(_A ) ).float() / 2_55.0
UpperCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase = '''A robot, 4k photo'''
UpperCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase = pipeline(
image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=1_0_0 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_A , _A )
| 130 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _lowerCamelCase ( __lowerCAmelCase ):
UpperCAmelCase_ = "audio-spectrogram-transformer"
def __init__(self , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1e-1_2 , __a=16 , __a=True , __a=10 , __a=10 , __a=10_24 , __a=1_28 , **__a , ) -> Optional[int]:
super().__init__(**lowerCamelCase__ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = patch_size
UpperCamelCase = qkv_bias
UpperCamelCase = frequency_stride
UpperCamelCase = time_stride
UpperCamelCase = max_length
UpperCamelCase = num_mel_bins
| 718 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _lowerCamelCase :
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=64 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Optional[int]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = vocab_size - 1
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case_ (self ) -> int:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def snake_case_ (self ) -> int:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = True
return config, input_ids, input_mask, token_labels
def snake_case_ (self , __a , __a , __a ) -> Optional[int]:
UpperCamelCase = GPTNeoXModel(config=__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , attention_mask=__a )
UpperCamelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ (self , __a , __a , __a ) -> Optional[int]:
UpperCamelCase = True
UpperCamelCase = GPTNeoXModel(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ (self , __a , __a , __a , __a ) -> List[Any]:
UpperCamelCase = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ (self , __a , __a , __a , __a ) -> Optional[int]:
UpperCamelCase = self.num_labels
UpperCamelCase = GPTNeoXForQuestionAnswering(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ (self , __a , __a , __a , __a ) -> Optional[int]:
UpperCamelCase = self.num_labels
UpperCamelCase = GPTNeoXForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ (self , __a , __a , __a , __a ) -> str:
UpperCamelCase = self.num_labels
UpperCamelCase = GPTNeoXForTokenClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ (self , __a , __a , __a ) -> Any:
UpperCamelCase = True
UpperCamelCase = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
UpperCamelCase = model(__a , attention_mask=__a , use_cache=__a )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(__a , attention_mask=__a , output_hidden_states=__a )
UpperCamelCase = output_from_no_past["hidden_states"][0]
UpperCamelCase = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case_ (self ) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def snake_case_ (self ) -> str:
UpperCamelCase = GPTNeoXModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=64 , num_attention_heads=8 )
def snake_case_ (self ) -> int:
self.config_tester.run_common_tests()
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def snake_case_ (self ) -> str:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def snake_case_ (self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def snake_case_ (self ) -> Any:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def snake_case_ (self ) -> Any:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def snake_case_ (self , __a ) -> Dict:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = GPTNeoXModel(__a )
original_model.to(__a )
original_model.eval()
UpperCamelCase = original_model(__a ).last_hidden_state
UpperCamelCase = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {"type": scaling_type, "factor": 10.0}
UpperCamelCase = GPTNeoXModel(__a )
scaled_model.to(__a )
scaled_model.eval()
UpperCamelCase = scaled_model(__a ).last_hidden_state
UpperCamelCase = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
UpperCamelCase = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__a )
UpperCamelCase = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
UpperCamelCase = model.generate(**__a , do_sample=__a , max_new_tokens=20 )
UpperCamelCase = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
| 544 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self : Optional[int] ,a__ : Any ,a__ : Optional[int]=13 ,a__ : Any=10 ,a__ : List[str]=3 ,a__ : Tuple=2 ,a__ : int=2 ,a__ : Union[str, Any]=True ,a__ : List[Any]=True ,a__ : int=32 ,a__ : Union[str, Any]=5 ,a__ : Union[str, Any]=4 ,a__ : Optional[int]=37 ,a__ : str="gelu" ,a__ : int=0.1 ,a__ : int=0.1 ,a__ : List[Any]=10 ,a__ : Optional[int]=0.02 ,a__ : Optional[Any]="divided_space_time" ,a__ : Optional[Any]=None ,) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Tuple = parent
_lowerCAmelCase:Optional[int] = batch_size
_lowerCAmelCase:str = image_size
_lowerCAmelCase:List[Any] = num_channels
_lowerCAmelCase:Dict = patch_size
_lowerCAmelCase:Optional[int] = num_frames
_lowerCAmelCase:Optional[Any] = is_training
_lowerCAmelCase:Any = use_labels
_lowerCAmelCase:Dict = hidden_size
_lowerCAmelCase:Any = num_hidden_layers
_lowerCAmelCase:Union[str, Any] = num_attention_heads
_lowerCAmelCase:str = intermediate_size
_lowerCAmelCase:List[Any] = hidden_act
_lowerCAmelCase:Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase:List[str] = attention_probs_dropout_prob
_lowerCAmelCase:Optional[Any] = attention_type
_lowerCAmelCase:str = initializer_range
_lowerCAmelCase:Dict = scope
_lowerCAmelCase:List[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCAmelCase:int = (image_size // patch_size) ** 2
_lowerCAmelCase:Optional[int] = (num_frames) * self.num_patches_per_frame + 1
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase:List[str] = None
if self.use_labels:
_lowerCAmelCase:Optional[int] = ids_tensor([self.batch_size] ,self.num_labels)
_lowerCAmelCase:str = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
_lowerCAmelCase:Tuple = self.num_labels
return config
def __UpperCamelCase ( self : Optional[Any] ,a__ : Optional[Any] ,a__ : List[str] ,a__ : int) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = TimesformerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_lowerCAmelCase:int = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] ,a__ : str ,a__ : str ,a__ : Dict) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = TimesformerForVideoClassification(lowercase_)
model.to(lowercase_)
model.eval()
_lowerCAmelCase:Optional[Any] = model(lowercase_)
# verify the logits shape
_lowerCAmelCase:Tuple = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape ,lowercase_)
def __UpperCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_lowerCAmelCase:List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase:List[str] = config_and_inputs
_lowerCAmelCase:Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
snake_case__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:int = TimesformerModelTester(self)
_lowerCAmelCase:Tuple = ConfigTester(
self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=37)
def __UpperCamelCase ( self : Optional[int] ,a__ : int ,a__ : List[str] ,a__ : str=False) -> int:
"""simple docstring"""
_lowerCAmelCase:List[Any] = copy.deepcopy(lowercase_)
if return_labels:
if model_class in get_values(lowercase_):
_lowerCAmelCase:int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase_)
return inputs_dict
def __UpperCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''')
def __UpperCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Any = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
_lowerCAmelCase:List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ ,nn.Linear))
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Optional[Any] = model_class(lowercase_)
_lowerCAmelCase:Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase:Any = [*signature.parameters.keys()]
_lowerCAmelCase:Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_)
def __UpperCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
_lowerCAmelCase:int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_)
@slow
def __UpperCamelCase ( self : Any) -> int:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Tuple = TimesformerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowerCAmelCase:Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:str = True
for model_class in self.all_model_classes:
_lowerCAmelCase:Dict = self.model_tester.seq_length
_lowerCAmelCase:Dict = self.model_tester.num_frames
_lowerCAmelCase:Any = True
_lowerCAmelCase:Union[str, Any] = False
_lowerCAmelCase:Optional[int] = True
_lowerCAmelCase:str = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_lowerCAmelCase:Union[str, Any] = model(**self._prepare_for_class(lowercase_ ,lowercase_))
_lowerCAmelCase:List[str] = outputs.attentions
self.assertEqual(len(lowercase_) ,self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase:int = True
_lowerCAmelCase:Optional[int] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_lowerCAmelCase:Tuple = model(**self._prepare_for_class(lowercase_ ,lowercase_))
_lowerCAmelCase:str = outputs.attentions
self.assertEqual(len(lowercase_) ,self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
_lowerCAmelCase:int = len(lowercase_)
# Check attention is always last and order is fine
_lowerCAmelCase:str = True
_lowerCAmelCase:Optional[Any] = True
_lowerCAmelCase:Tuple = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_lowerCAmelCase:List[Any] = model(**self._prepare_for_class(lowercase_ ,lowercase_))
self.assertEqual(out_len + 1 ,len(lowercase_))
_lowerCAmelCase:List[str] = outputs.attentions
self.assertEqual(len(lowercase_) ,self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def __UpperCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
def check_hidden_states_output(a__ : Optional[int] ,a__ : int ,a__ : Tuple):
_lowerCAmelCase:Dict = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_lowerCAmelCase:List[str] = model(**self._prepare_for_class(lowercase_ ,lowercase_))
_lowerCAmelCase:Union[str, Any] = outputs.hidden_states
_lowerCAmelCase:Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_) ,lowercase_)
_lowerCAmelCase:Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[seq_length, self.model_tester.hidden_size] ,)
_lowerCAmelCase:Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Tuple = True
check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase:Any = True
check_hidden_states_output(lowercase_ ,lowercase_ ,lowercase_)
def UpperCAmelCase ( ):
_lowerCAmelCase:Dict = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_lowerCAmelCase:Any = np.load(lowercase_ )
return list(lowercase_ )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int) -> Dict:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''').to(
lowercase_)
_lowerCAmelCase:Optional[int] = self.default_image_processor
_lowerCAmelCase:List[str] = prepare_video()
_lowerCAmelCase:List[Any] = image_processor(video[:8] ,return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
_lowerCAmelCase:str = model(**lowercase_)
# verify the logits
_lowerCAmelCase:Dict = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape ,lowercase_)
_lowerCAmelCase:Dict = torch.tensor([-0.3016, -0.7713, -0.4205]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase_ ,atol=1E-4))
| 227 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowerCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModel)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class A (_BaseAutoModelClass ):
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 326 | 0 |
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
return str(lowerCamelCase__ ) == str(lowerCamelCase__ )[::-1]
def __lowercase ( _UpperCAmelCase ) -> int:
'''simple docstring'''
return int(lowerCamelCase__ ) + int(str(lowerCamelCase__ )[::-1] )
def __lowercase ( _UpperCAmelCase = 10_000 ) -> int:
'''simple docstring'''
__lowercase = []
for num in range(1 , lowerCamelCase__ ):
__lowercase = 0
__lowercase = num
while iterations < 50:
__lowercase = sum_reverse(lowerCamelCase__ )
iterations += 1
if is_palindrome(lowerCamelCase__ ):
break
else:
lychrel_nums.append(lowerCamelCase__ )
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 710 | import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = (DDIMParallelScheduler,)
__lowerCAmelCase = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowerCAmelCase_ )
return config
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowerCAmelCase_ )
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase , __lowercase = 10, 0.0
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase_ )
for t in scheduler.timesteps:
__lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
return sample
def snake_case__ ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def snake_case__ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase_ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(steps_offset=1 )
__lowercase = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case__ ( self ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def snake_case__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def snake_case__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def snake_case__ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase_ )
def snake_case__ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase_ )
def snake_case__ ( self ):
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def snake_case__ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def snake_case__ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ )
def snake_case__ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase_ , eta=lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase , __lowercase = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase_ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = self.dummy_sample_deter + 0.1
__lowercase = self.dummy_sample_deter - 0.1
__lowercase = samplea.shape[0]
__lowercase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowercase = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
__lowercase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowercase = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase_ )
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def snake_case__ ( self ):
__lowercase = self.full_loop()
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def snake_case__ ( self ):
__lowercase = self.full_loop(prediction_type="v_prediction" )
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def snake_case__ ( self ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.01 )
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def snake_case__ ( self ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowerCAmelCase_ , beta_start=0.01 )
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 576 | 0 |
import gc
import threading
import time
import psutil
import torch
class __a :
def __init__( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Any = psutil.Process()
UpperCamelCase__ : int = False
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : str = -1
while True:
UpperCamelCase__ : Union[str, Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Optional[Any] = threading.Thread(target=self.peak_monitor )
UpperCamelCase__ : Optional[Any] = True
self.thread.start()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase : Optional[Any] =PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ) -> Dict:
# Time
UpperCamelCase__ : Tuple = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCamelCase__ : Union[str, Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCamelCase__ : Optional[int] = torch.cuda.memory_allocated(__lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
# Time
UpperCamelCase__ : int = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCamelCase__ : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
UpperCamelCase__ : Any = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCamelCase__ : Tuple = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
UpperCamelCase__ : Union[str, Any] = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB' )
UpperCamelCase__ : List[Any] = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' ) | 228 |
lowerCamelCase : dict[str, float] ={
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
lowerCamelCase : dict[str, float] ={
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCamelCase__ : Tuple = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(__lowerCAmelCase )}'
)
raise ValueError(__lowerCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a : int = parser.parse_args()
if args.model_type == "roberta":
_a : Tuple = RobertaForMaskedLM.from_pretrained(args.model_name)
_a : List[str] = """roberta"""
elif args.model_type == "gpt2":
_a : Dict = GPTaLMHeadModel.from_pretrained(args.model_name)
_a : Union[str, Any] = """transformer"""
_a : List[str] = model.state_dict()
_a : str = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_a : str = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_a : Dict = f"""{prefix}.embeddings.{w}.weight"""
_a : List[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_a : List[Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_a : List[str] = state_dict[param_name]
# Transformer Blocks #
_a : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_a : Union[str, Any] = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_a : List[Any] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_a : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_a : Tuple = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a : Optional[int] = state_dict[f"""lm_head.dense.{w}"""]
_a : Dict = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_a : str = state_dict[f"""{prefix}.ln_f.{w}"""]
_a : Union[str, Any] = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 111 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = ShapEImgaImgPipeline
A = ['''image''']
A = ['''image''']
A = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 8
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase__ :int = CLIPVisionModel(_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_resize=_lowerCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase__ :int = PriorTransformer(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ :str = ShapERenderer(**_lowerCAmelCase )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.dummy_prior
lowerCAmelCase__ :str = self.dummy_image_encoder
lowerCAmelCase__ :Optional[Any] = self.dummy_image_processor
lowerCAmelCase__ :Union[str, Any] = self.dummy_renderer
lowerCAmelCase__ :str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
lowerCAmelCase__ :Any = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :Dict = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :str = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Tuple = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = "cpu"
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :Union[str, Any] = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :int = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :List[Any] = output.images[0]
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ :str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch_device == "cpu"
lowerCAmelCase__ :Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :List[Any] = 2
lowerCAmelCase__ :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ :Any = batch_size * [inputs[key]]
lowerCAmelCase__ :Optional[Any] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
lowerCAmelCase__ :Optional[int] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
lowerCAmelCase__ :Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Any = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ :List[Any] = pipe(
_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if red is not None:
lowerCAmelCase = red
if green is not None:
lowerCAmelCase = green
if blue is not None:
lowerCAmelCase = blue
if red_edge is not None:
lowerCAmelCase = red_edge
if nir is not None:
lowerCAmelCase = nir
return True
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0.08 , _SCREAMING_SNAKE_CASE=1.22 , _SCREAMING_SNAKE_CASE=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir / self.green) - 1
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.nir - self.green
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.nir / self.red
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.nir / self.red
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 284 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Union[str, Any] = '''convbert'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=2 , UpperCAmelCase=9 , UpperCAmelCase=1 , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =embedding_size
__UpperCAmelCase =head_ratio
__UpperCAmelCase =conv_kernel_size
__UpperCAmelCase =num_groups
__UpperCAmelCase =classifier_dropout
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def A__ (self):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 132 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 706 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
_UpperCamelCase = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 71 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Dict ):
snake_case_ : List[str] = get_activation("""swish""" )
self.assertIsInstance(_UpperCamelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(2_0 ,dtype=torch.floataa ) ).item() ,2_0 )
def a__ ( self :Optional[int] ):
snake_case_ : Optional[Any] = get_activation("""silu""" )
self.assertIsInstance(_UpperCamelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(2_0 ,dtype=torch.floataa ) ).item() ,2_0 )
def a__ ( self :Optional[int] ):
snake_case_ : Optional[Any] = get_activation("""mish""" )
self.assertIsInstance(_UpperCamelCase ,nn.Mish )
self.assertEqual(act(torch.tensor(-2_0_0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(2_0 ,dtype=torch.floataa ) ).item() ,2_0 )
def a__ ( self :str ):
snake_case_ : int = get_activation("""gelu""" )
self.assertIsInstance(_UpperCamelCase ,nn.GELU )
self.assertEqual(act(torch.tensor(-1_0_0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(2_0 ,dtype=torch.floataa ) ).item() ,2_0 ) | 334 |
'''simple docstring'''
__A : List[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCAmelCase ( lowerCamelCase_ :float ):
'''simple docstring'''
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
snake_case_ : int = int(lowerCamelCase_ )
snake_case_ : int = """"""
snake_case_ : List[str] = False
if decimal < 0:
snake_case_ : Any = True
decimal *= -1
while decimal > 0:
snake_case_ , snake_case_ : List[str] = divmod(lowerCamelCase_ , 16 )
snake_case_ : Tuple = values[remainder] + hexadecimal
snake_case_ : Dict = """0x""" + hexadecimal
if negative:
snake_case_ : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 334 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """xlm-roberta-xl"""
def __init__( self : Dict , snake_case_ : List[Any]=2_5_0_8_8_0 , snake_case_ : Dict=2_5_6_0 , snake_case_ : int=3_6 , snake_case_ : Any=3_2 , snake_case_ : Any=1_0_2_4_0 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Any=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=5_1_4 , snake_case_ : Optional[int]=1 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : Union[str, Any]=1e-05 , snake_case_ : Optional[Any]=1 , snake_case_ : Dict=0 , snake_case_ : str=2 , snake_case_ : Dict="absolute" , snake_case_ : int=True , snake_case_ : Any=None , **snake_case_ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
snake_case__ : Any = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : List[Any] = intermediate_size
snake_case__ : Any = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Any = position_embedding_type
snake_case__ : Optional[int] = use_cache
snake_case__ : int = classifier_dropout
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 502 |
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
snake_case__ , snake_case__ : List[Any] = 0, 1
while True:
snake_case__ , snake_case__ : str = b, a + b
yield b
def _a ( __lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
snake_case__ : Optional[int] = 1
snake_case__ : Tuple = fibonacci_generator()
while len(str(next(__lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 502 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=UpperCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = super().to_dict()
for k, v in d.items():
if isinstance(__A ,__A ):
_lowercase = v.to_dict()
return d | 67 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE : Dict = True
except ImportError:
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( lowerCAmelCase__: Namespace ) -> Optional[Any]:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class snake_case ( lowercase_ ):
"""simple docstring"""
@staticmethod
def a__ ( _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing', action='store_true', help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file', type=_lowercase, help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path', type=_lowercase, help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=_lowercase )
def __init__( self, _lowercase, _lowercase, _lowercase=None, *_lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = testing
SCREAMING_SNAKE_CASE_ = testing_file
SCREAMING_SNAKE_CASE_ = path
def a__ ( self ) -> Optional[int]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(_lowercase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
SCREAMING_SNAKE_CASE_ = (
Path(_lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
SCREAMING_SNAKE_CASE_ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowercase ) )
else:
with open(self._testing_file, 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ), no_input=_lowercase, extra_context=_lowercase, )
SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json', 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
SCREAMING_SNAKE_CASE_ = configuration['lowercase_modelname']
SCREAMING_SNAKE_CASE_ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"""{directory}/configuration.json""" )
SCREAMING_SNAKE_CASE_ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = 'Flax' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_lowercase, exist_ok=_lowercase )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""", exist_ok=_lowercase )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""", 'w' ):
pass
shutil.move(
f"""{directory}/__init__.py""", f"""{model_dir}/__init__.py""", )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""", f"""{model_dir}/configuration_{lowercase_model_name}.py""", )
def remove_copy_lines(_lowercase ):
with open(_lowercase, 'r' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
with open(_lowercase, 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""", f"""{model_dir}/modeling_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""", f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""", f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""", f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""", )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""", f"""{model_dir}/tokenization_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""", f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""", )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowercase, _lowercase, _lowercase ):
# Create temp file
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mkstemp()
SCREAMING_SNAKE_CASE_ = False
with fdopen(_lowercase, 'w' ) as new_file:
with open(_lowercase ) as old_file:
for line in old_file:
new_file.write(_lowercase )
if line_to_copy_below in line:
SCREAMING_SNAKE_CASE_ = True
for line_to_copy in lines_to_copy:
new_file.write(_lowercase )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_lowercase, _lowercase )
# Remove original file
remove(_lowercase )
# Move new file
move(_lowercase, _lowercase )
def skip_units(_lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowercase ):
with open(_lowercase ) as datafile:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = line.split('"' )[1]
SCREAMING_SNAKE_CASE_ = skip_units(_lowercase )
elif "# Below: " in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = line.split('"' )[1]
SCREAMING_SNAKE_CASE_ = skip_units(_lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowercase, _lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = []
elif "# Replace with" in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = []
elif "##" not in line:
lines_to_copy.append(_lowercase )
remove(_lowercase )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_lowercase )
| 294 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
_A = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_A = DetaConfig(
backbone_config=_snake_case , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_snake_case , with_box_refine=_snake_case , two_stage=_snake_case , )
# set labels
_A = 'huggingface/label-files'
if "o365" in model_name:
_A = 3_66
_A = 'object365-id2label.json'
else:
_A = 91
_A = 'coco-detection-id2label.json'
_A = num_labels
_A = json.load(open(cached_download(hf_hub_url(_snake_case , _snake_case , repo_type='dataset' ) ) , 'r' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( _snake_case : str ) -> int:
'''simple docstring'''
_A = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _snake_case ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
_A = dct.pop(_snake_case )
_A = val
def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
_A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:dim, :]
_A = in_proj_bias[: dim]
_A = in_proj_weight[
dim : dim * 2, :
]
_A = in_proj_bias[
dim : dim * 2
]
_A = in_proj_weight[
-dim :, :
]
_A = in_proj_bias[-dim :]
# fmt: on
def _snake_case ( _snake_case : List[str] , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
_A = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:hidden_size, :]
_A = in_proj_bias[:hidden_size]
_A = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_A = in_proj_bias[hidden_size : hidden_size * 2]
_A = in_proj_weight[-hidden_size:, :]
_A = in_proj_bias[-hidden_size:]
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : Tuple ) -> Dict:
'''simple docstring'''
_A = get_deta_config(_snake_case )
# load original state dict
if model_name == "deta-swin-large":
_A = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_A = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
_A = torch.load(_snake_case , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_snake_case , param.shape )
# rename keys
_A = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_swin_q_k_v(_snake_case , config.backbone_config )
read_in_decoder_q_k_v(_snake_case , _snake_case )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_A = state_dict.pop(_snake_case )
_A = val
if "input_proj" in key:
_A = state_dict.pop(_snake_case )
_A = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_A = state_dict.pop(_snake_case )
_A = val
# finally, create HuggingFace model and load state dict
_A = DetaForObjectDetection(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_snake_case )
# load image processor
_A = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_A = prepare_img()
_A = processor(images=_snake_case , return_tensors='pt' )
_A = encoding['pixel_values']
_A = model(pixel_values.to(_snake_case ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_A = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_A = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_snake_case ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_snake_case ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 505 |
"""simple docstring"""
def _snake_case ( _snake_case : bytes ) -> str:
'''simple docstring'''
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def _snake_case ( _snake_case : str ) -> bytes:
'''simple docstring'''
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 505 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase ( ) -> Optional[int]:
from torch.utils.cpp_extension import load
__magic_name__ = Path(__a ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__magic_name__ = [
root / filename
for filename in [
'vision.cpp',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __a , with_cuda=__a , extra_include_paths=[str(__a )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 490 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,_a : List[str] ):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = torchvision.models.resnetaaa(pretrained=_a )
_a : Tuple = list(model.children() )[:-2]
_a : Union[str, Any] = nn.Sequential(*_a )
_a : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __lowercase ( self : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
_a : str = self.pool(self.model(_a ) )
_a : int = torch.flatten(_a ,start_dim=2 )
_a : Optional[int] = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any ,_a : Any ,_a : List[Any] ,_a : int ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Dict = [json.loads(_a ) for l in open(_a )]
_a : Optional[int] = os.path.dirname(_a )
_a : Any = tokenizer
_a : Optional[Any] = labels
_a : Optional[Any] = len(_a )
_a : str = max_seq_length
_a : Any = transforms
def __len__( self : Dict ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Any ,_a : int ):
'''simple docstring'''
_a : Any = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] ,add_special_tokens=_a ) )
_a, _a, _a : Optional[int] = sentence[0], sentence[1:-1], sentence[-1]
_a : Any = sentence[: self.max_seq_length]
_a : Dict = torch.zeros(self.n_classes )
_a : Any = 1
_a : Dict = Image.open(os.path.join(self.data_dir ,self.data[index]['img'] ) ).convert('RGB' )
_a : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : Optional[Any] = [len(row['sentence'] ) for row in batch]
_a, _a : int = len(__a ), max(__a )
_a : Any = torch.zeros(__a , __a , dtype=torch.long )
_a : Tuple = torch.zeros(__a , __a , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__a , __a ) ):
_a : List[Any] = input_row['sentence']
_a : Union[str, Any] = 1
_a : List[Any] = torch.stack([row['image'] for row in batch] )
_a : Dict = torch.stack([row['label'] for row in batch] )
_a : Tuple = torch.stack([row['image_start_token'] for row in batch] )
_a : Any = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCAmelCase_ ():
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCAmelCase_ ():
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 229 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCamelCase__ ( _A ):
a : Tuple = {}
a : List[Any] = job['started_at']
a : str = job['completed_at']
a : Dict = date_parser.parse(_A )
a : Any = date_parser.parse(_A )
a : List[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a : Tuple = start
a : Any = end
a : int = duration_in_min
return job_info
def lowerCamelCase__ ( _A , _A=None ):
a : Dict = None
if token is not None:
a : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
a : int = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
a : Optional[int] = requests.get(_A , headers=_A ).json()
a : Dict = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_A ) for job in result['jobs']} )
a : Any = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_A ):
a : Tuple = requests.get(url + f"""&page={i + 2}""" , headers=_A ).json()
job_time.update({job['name']: extract_time_from_single_job(_A ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
lowerCAmelCase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowerCAmelCase: Tuple = parser.parse_args()
lowerCAmelCase: Any = get_job_time(args.workflow_run_id)
lowerCAmelCase: Any = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}") | 195 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a__( unittest.TestCase ):
def __init__( self : Dict , __snake_case : Union[str, Any] , __snake_case : Any=13 , __snake_case : Any=7 , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : int=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=99 , __snake_case : List[str]=32 , __snake_case : List[Any]=5 , __snake_case : int=4 , __snake_case : List[str]=37 , __snake_case : int="gelu" , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Any=5_12 , __snake_case : Tuple=16 , __snake_case : Union[str, Any]=2 , __snake_case : str=0.02 , __snake_case : Union[str, Any]=4 , ):
a : List[Any] = parent
a : List[str] = batch_size
a : Dict = seq_length
a : str = is_training
a : Optional[int] = use_attention_mask
a : Union[str, Any] = use_token_type_ids
a : List[str] = use_labels
a : Dict = vocab_size
a : Tuple = hidden_size
a : Optional[Any] = num_hidden_layers
a : List[str] = num_attention_heads
a : Tuple = intermediate_size
a : Dict = hidden_act
a : List[str] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Tuple = type_vocab_size
a : Optional[Any] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Union[str, Any] = num_choices
def lowercase_ ( self : Any ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : List[str] = None
if self.use_attention_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Dict = None
if self.use_token_type_ids:
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ ( self : Tuple ):
a : Optional[int] = self.prepare_config_and_inputs()
a , a , a , a : Union[str, Any] = config_and_inputs
a : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase_ ( self : List[str] ):
a : int = self.prepare_config_and_inputs()
a , a , a , a : Tuple = config_and_inputs
a : int = True
a : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = True
lowercase__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ ( self : Union[str, Any] ):
a : str = FlaxBertModelTester(self )
@slow
def lowercase_ ( self : Any ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
a : Optional[Any] = FlaxBertModel.from_pretrained('bert-base-cased' )
a : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case ) | 195 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] ="""data2vec-text"""
def __init__( self , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :Any = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :Optional[Any] = intermediate_size
UpperCamelCase :Union[str, Any] = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :List[str] = max_position_embeddings
UpperCamelCase :Any = type_vocab_size
UpperCamelCase :Any = initializer_range
UpperCamelCase :Dict = layer_norm_eps
UpperCamelCase :Union[str, Any] = position_embedding_type
UpperCamelCase :Any = use_cache
UpperCamelCase :List[str] = classifier_dropout
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Optional[int]:
if self.task == "multiple-choice":
UpperCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase :List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 658 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84 | 0 |
'''simple docstring'''
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
_lowerCAmelCase =0
for ch in input_str:
_lowerCAmelCase =ord(lowercase__ )
_lowerCAmelCase =pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import math
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =[True] * n
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCAmelCase =i * 2
while index < n:
_lowerCAmelCase =False
_lowerCAmelCase =index + i
_lowerCAmelCase =[2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def snake_case_ ( lowercase__ : int = 99_99_66_66_33_33 ):
'''simple docstring'''
_lowerCAmelCase =math.floor(math.sqrt(lowercase__ ) ) + 1_00
_lowerCAmelCase =prime_sieve(lowercase__ )
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =primes[prime_index]
while (last_prime**2) <= limit:
_lowerCAmelCase =primes[prime_index + 1]
_lowerCAmelCase =last_prime**2
_lowerCAmelCase =next_prime**2
# Get numbers divisible by lps(current)
_lowerCAmelCase =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCAmelCase =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCAmelCase =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCAmelCase =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 149 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = 'convbert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = embedding_size
lowercase__ : List[str] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Dict = num_groups
lowercase__ : int = classifier_dropout
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 12 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
_lowerCAmelCase :Optional[int] = quote(__magic_name__ )
return hfh.hf_hub_url(__magic_name__ , __magic_name__ , repo_type='dataset' , revision=__magic_name__ ) | 382 |
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__magic_name__ , 2 ) + pow(__magic_name__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 382 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : str , _snake_case : str ) -> Tuple:
'''simple docstring'''
_A = RobertaPreLayerNormConfig.from_pretrained(
_snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_A = torch.load(hf_hub_download(repo_id=_snake_case , filename='pytorch_model.bin' ) )
_A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_A = tensor_value
_A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_snake_case , config=_snake_case , state_dict=_snake_case )
model.save_pretrained(_snake_case )
# convert tokenizer
_A = AutoTokenizer.from_pretrained(_snake_case )
tokenizer.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 7 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> List[str]:
"""simple docstring"""
lowercase : int = parent
lowercase : List[str] = batch_size
lowercase : Optional[int] = seq_length
lowercase : Dict = is_training
lowercase : int = use_attention_mask
lowercase : Dict = use_token_type_ids
lowercase : int = use_labels
lowercase : List[str] = vocab_size
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : Optional[Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Union[str, Any] = type_vocab_size
lowercase : List[str] = type_sequence_label_size
lowercase : Union[str, Any] = initializer_range
lowercase : Tuple = num_choices
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] = None
if self.use_attention_mask:
lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Tuple = config_and_inputs
lowercase : Union[str, Any] = True
lowercase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = FlaxRobertaModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase : Optional[Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=snake_case )
lowercase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 607 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
return len(set(UpperCAmelCase ) ) == len(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 718 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , *lowercase , **lowercase ) -> str:
super().__init__(*lowercase , **lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case__( self , lowercase=None ) -> int:
_a : Optional[Any] = {}
if top_k is not None:
_a : Optional[Any] = top_k
return {}, {}, postprocess_params
def __call__( self , lowercase , **lowercase ) -> Dict:
return super().__call__(lowercase , **lowercase )
def snake_case__( self , lowercase ) -> List[str]:
_a : Optional[int] = load_image(lowercase )
_a : Optional[int] = self.image_processor(images=lowercase , return_tensors=self.framework )
return model_inputs
def snake_case__( self , lowercase ) -> Union[str, Any]:
_a : Union[str, Any] = self.model(**lowercase )
return model_outputs
def snake_case__( self , lowercase , lowercase=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_a : Dict = self.model.config.num_labels
if self.framework == "pt":
_a : Tuple = model_outputs.logits.softmax(-1 )[0]
_a , _a : Optional[int] = probs.topk(lowercase )
elif self.framework == "tf":
_a : Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
_a : List[Any] = tf.math.top_k(lowercase , k=lowercase )
_a , _a : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_a : Optional[int] = scores.tolist()
_a : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )] | 307 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=32 , __a=True , ):
'''simple docstring'''
__a : int = parent
__a : List[Any] = batch_size
__a : Dict = num_channels
__a : Tuple = image_size
__a : Any = min_resolution
__a : str = max_resolution
__a : Union[str, Any] = do_resize
__a : int = size_divisor
__a : List[Any] = do_rescale
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = GLPNImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = GLPNImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size_divisor' ) )
self.assertTrue(hasattr(__a , 'resample' ) )
self.assertTrue(hasattr(__a , 'do_rescale' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 476 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : str = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "pix2struct_text_model"
A_ = ["past_key_values"]
A_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __a=5_0244 , __a=768 , __a=64 , __a=2048 , __a=12 , __a=12 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gelu_new" , __a=0 , __a=False , __a=0 , __a=1 , __a=False , __a=True , **__a , ):
'''simple docstring'''
__a : Dict = vocab_size
__a : str = hidden_size
__a : Any = d_kv
__a : Any = d_ff
__a : Any = num_layers
__a : Any = num_heads
__a : Optional[Any] = relative_attention_num_buckets
__a : int = relative_attention_max_distance
__a : List[str] = dropout_rate
__a : Dict = layer_norm_epsilon
__a : int = initializer_factor
__a : Any = use_cache
__a : int = eos_token_id
__a : int = decoder_start_token_id
# for backwards compatibility
__a : Tuple = dense_act_fn
super().__init__(
pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , tie_word_embeddings=__a , is_decoder=__a , **__a , )
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
__a , __a : Union[str, Any] = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
__a : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "pix2struct_vision_model"
def __init__( self , __a=768 , __a=768 , __a=2048 , __a=64 , __a=12 , __a=12 , __a="gelu_new" , __a=1E-6 , __a=0.0 , __a=0.0 , __a=1E-1_0 , __a=1.0 , __a=4096 , __a=32 , __a=128 , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Dict = hidden_size
__a : Tuple = patch_embed_hidden_size
__a : Tuple = d_ff
__a : int = dropout_rate
__a : Any = num_hidden_layers
__a : List[str] = num_attention_heads
__a : Optional[Any] = initializer_range
__a : Any = initializer_factor
__a : Optional[Any] = attention_dropout
__a : List[Any] = layer_norm_eps
__a : Dict = dense_act_fn
__a : Union[str, Any] = seq_len
__a : str = relative_attention_num_buckets
__a : Dict = relative_attention_max_distance
__a : Any = d_kv
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
__a , __a : Any = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
__a : str = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "pix2struct"
A_ = True
def __init__( self , __a=None , __a=None , __a=1.0 , __a=0.02 , __a=False , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__a , is_encoder_decoder=__a , **__a )
if text_config is None:
__a : str = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
__a : Optional[Any] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
__a : Optional[Any] = PixaStructTextConfig(**__a )
__a : Union[str, Any] = PixaStructVisionConfig(**__a )
__a : List[str] = self.text_config.decoder_start_token_id
__a : Optional[Any] = self.text_config.pad_token_id
__a : Dict = self.text_config.eos_token_id
__a : Any = initializer_factor
__a : List[str] = initializer_range
__a : int = self.initializer_range
__a : Union[str, Any] = self.initializer_range
__a : Dict = is_vqa
@classmethod
def __UpperCAmelCase ( cls , __a , __a , **__a ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = copy.deepcopy(self.__dict__ )
__a : Optional[int] = self.text_config.to_dict()
__a : Optional[int] = self.vision_config.to_dict()
__a : Optional[Any] = self.__class__.model_type
return output
| 476 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
return 10 - x * x
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Bolzano theory in order to find if there is a root between a and b
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) >= 0:
raise ValueError('Wrong space!' )
lowercase_ : str = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ : Any = (a + b) / 2
# Check if middle point is root
if equation(SCREAMING_SNAKE_CASE_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) < 0:
lowercase_ : Optional[int] = c
else:
lowercase_ : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 438 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_A = None
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_A = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
_A = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = ['''input_ids''', '''attention_mask''']
A : str = NllbTokenizer
A : List[int] = []
A : List[int] = []
def __init__(self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
lowercase_ : Any = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
lowercase_ : Optional[int] = vocab_file
lowercase_ : str = False if not self.vocab_file else True
lowercase_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Optional[Any] = src_lang if src_lang is not None else 'eng_Latn'
lowercase_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase (self ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase (self , _a , _a , _a , _a , **_a ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Dict = src_lang
lowercase_ : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
lowercase_ : Tuple = self.convert_tokens_to_ids(_a )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _lowerCamelCase (self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ) -> BatchEncoding:
lowercase_ : Dict = src_lang
lowercase_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowerCamelCase (self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase (self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : Dict = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
lowercase_ : Tuple = []
lowercase_ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Union[str, Any] = [self.cur_lang_code]
lowercase_ : Union[str, Any] = [self.eos_token_id]
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : str = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Optional[int] = [self.cur_lang_code]
lowercase_ : Dict = [self.eos_token_id]
lowercase_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase (self , _a , _a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : Any = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 438 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Union[str, Any]=8 ) -> Optional[int]:
UpperCamelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase ( snake_case__ : Any , snake_case__ : int=512 , snake_case__ : List[str]=512 ) -> Dict:
UpperCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCamelCase : str = np.array(pil_image.convert('RGB' ) )
UpperCamelCase : Dict = arr.astype(np.floataa ) / 127.5 - 1
UpperCamelCase : int = np.transpose(snake_case__ , [2, 0, 1] )
UpperCamelCase : Optional[Any] = torch.from_numpy(snake_case__ ).unsqueeze(0 )
return image
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, movq=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# get the original timestep using init_timestep
UpperCamelCase : str = min(int(num_inference_steps * strength ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = max(num_inference_steps - init_timestep, 0 )
UpperCamelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}""" )
UpperCamelCase : Tuple = image.to(device=SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase : int = image
else:
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
else:
UpperCamelCase : Optional[Any] = self.movq.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = self.movq.config.scaling_factor * init_latents
UpperCamelCase : Optional[int] = torch.cat([init_latents], dim=0 )
UpperCamelCase : int = init_latents.shape
UpperCamelCase : Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase : Optional[int] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = init_latents
return latents
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase : Dict = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCamelCase : Optional[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase : Any = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, prev_module_hook=SCREAMING_SNAKE_CASE_ )
# We'll offload the last model manually.
UpperCamelCase : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ) -> Optional[Any]:
if not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 100, SCREAMING_SNAKE_CASE_ = 4.0, SCREAMING_SNAKE_CASE_ = 0.3, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "pil", SCREAMING_SNAKE_CASE_ = True, ) -> Optional[Any]:
UpperCamelCase : str = self._execution_device
UpperCamelCase : Union[str, Any] = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : int = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE_, (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase : Any = torch.cat([prepare_image(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for i in image], dim=0 )
UpperCamelCase : List[str] = image.to(dtype=image_embeds.dtype, device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.movq.encode(SCREAMING_SNAKE_CASE_ )['latents']
UpperCamelCase : List[Any] = latents.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : str = self.get_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase , UpperCamelCase : Union[str, Any] = downscale_height_and_width(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.movq_scale_factor )
UpperCamelCase : Optional[int] = self.prepare_latents(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, image_embeds.dtype, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : List[Any] = {'image_embeds': image_embeds}
UpperCamelCase : Optional[Any] = self.unet(
sample=SCREAMING_SNAKE_CASE_, timestep=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, added_cond_kwargs=SCREAMING_SNAKE_CASE_, return_dict=SCREAMING_SNAKE_CASE_, )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Tuple = noise_pred.split(latents.shape[1], dim=1 )
UpperCamelCase , UpperCamelCase : str = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase : Any = variance_pred.chunk(2 )
UpperCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase : Any = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Dict = self.scheduler.step(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, )[0]
# post-processing
UpperCamelCase : int = self.movq.decode(SCREAMING_SNAKE_CASE_, force_not_quantize=SCREAMING_SNAKE_CASE_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : Dict = image * 0.5 + 0.5
UpperCamelCase : Optional[Any] = image.clamp(0, 1 )
UpperCamelCase : str = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 40 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def A__ ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(A__ , config=A__ )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def A__ ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(A__ , config=A__ )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def A__ ( A__ , A__ , A__ ) -> Any:
'''simple docstring'''
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(A__ , config=A__ )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_UpperCAmelCase = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def A__ ( A__ , A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = torch.load(A__ , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(A__ )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
A__ , return_attention_mask=A__ , do_normalize=A__ )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(A__ , A__ , A__ )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(A__ , A__ , A__ )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(A__ , A__ , A__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 426 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
__snake_case = datasets.logging.get_logger(__name__)
__snake_case = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__snake_case = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__snake_case = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if self.config_name == "default":
snake_case : Dict = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case : Optional[int] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
if gpus is None:
snake_case : Union[str, Any] = 1 if torch.cuda.is_available() else 0
snake_case : Dict = {"src": sources, "mt": predictions, "ref": references}
snake_case : List[str] = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for t in zip(*data.values() )]
snake_case : int = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 715 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = LayoutLMTokenizer
__UpperCAmelCase : List[Any] = LayoutLMTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Dict = True
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = "UNwant\u00E9d,running"
snake_case : Optional[int] = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.tokenizer_class(self.vocab_file )
snake_case : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
| 117 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE__ = '''true'''
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int]=8_2 , lowerCamelCase_ : str=1_6 ):
set_seed(4_2 )
__a : Any = RegressionModel()
__a : int = deepcopy(lowerCamelCase_ )
__a : Tuple = RegressionDataset(length=lowerCamelCase_ )
__a : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ )
model.to(accelerator.device )
__a , __a : Union[str, Any] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
return model, ddp_model, dataloader
def UpperCAmelCase__ ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : Dict=False ):
__a : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
__a : List[Any] = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase_ : Optional[int] ):
__a : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
with accelerator.main_process_first():
__a : Tuple = dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
__a : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=1_6 )
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
__a : Optional[int] = Accelerator(dispatch_batches=lowerCamelCase_ , split_batches=lowerCamelCase_ )
__a : List[Any] = get_dataloader(lowerCamelCase_ , not dispatch_batches )
__a : str = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCamelCase_ )
__a , __a : List[str] = accelerator.prepare(lowerCamelCase_ , lowerCamelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
__a : List[str] = []
for batch in dataloader:
__a , __a : Tuple = batch.values()
with torch.no_grad():
__a : Union[str, Any] = model(lowerCamelCase_ )
__a , __a : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__a , __a : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase_ )
targs.append(lowerCamelCase_ )
__a , __a : str = torch.cat(lowerCamelCase_ ), torch.cat(lowerCamelCase_ )
return logits, targs
def UpperCAmelCase__ ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : Tuple=8_2 , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : int=False , lowerCamelCase_ : Union[str, Any]=1_6 ):
__a , __a , __a : Tuple = get_basic_setup(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__a , __a : List[str] = generate_predictions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
assert (
len(lowerCamelCase_ ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase_ )}'''
def UpperCAmelCase__ ( lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False ):
__a : List[Any] = evaluate.load('glue' , 'mrpc' )
__a , __a : Tuple = get_mrpc_setup(lowerCamelCase_ , lowerCamelCase_ )
# First do baseline
__a , __a , __a : Optional[Any] = setup['no']
model.to(lowerCamelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase_ )
with torch.inference_mode():
__a : List[Any] = model(**lowerCamelCase_ )
__a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCamelCase_ , references=batch['labels'] )
__a : Union[str, Any] = metric.compute()
# Then do distributed
__a , __a , __a : Any = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__a : str = model(**lowerCamelCase_ )
__a : Optional[Any] = outputs.logits.argmax(dim=-1 )
__a : int = batch['labels']
__a , __a : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCamelCase_ , references=lowerCamelCase_ )
__a : Tuple = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCAmelCase__ ( ):
__a : str = Accelerator(split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowerCamelCase_ , lowerCamelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__a : Optional[int] = Accelerator(split_batches=lowerCamelCase_ , dispatch_batches=lowerCamelCase_ )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowerCamelCase_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
__a : Dict = Accelerator()
test_torch_metrics(lowerCamelCase_ , 5_1_2 )
accelerator.state._reset_state()
def UpperCAmelCase__ ( lowerCamelCase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 47 |
import math
from datetime import datetime, timedelta
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
__a : Union[str, Any] = year % 1_9
__a : int = year % 4
__a : Optional[int] = year % 7
__a : Dict = math.floor(year / 1_0_0 )
__a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Union[str, Any] = leap_day_inhibits / 4
__a : str = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase_ , 4 , 1_8 )
else:
return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 47 | 1 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( _a ):
snake_case_ : int = """SpeechT5FeatureExtractor"""
snake_case_ : Tuple = """SpeechT5Tokenizer"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = kwargs.pop("audio" , __lowerCAmelCase )
__snake_case = kwargs.pop("text" , __lowerCAmelCase )
__snake_case = kwargs.pop("text_target" , __lowerCAmelCase )
__snake_case = kwargs.pop("audio_target" , __lowerCAmelCase )
__snake_case = kwargs.pop("sampling_rate" , __lowerCAmelCase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
__snake_case = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
elif text is not None:
__snake_case = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
else:
__snake_case = None
if audio_target is not None:
__snake_case = self.feature_extractor(audio_target=__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = targets["input_values"]
elif text_target is not None:
__snake_case = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = targets["input_ids"]
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = kwargs.pop("input_values" , __lowerCAmelCase )
__snake_case = kwargs.pop("input_ids" , __lowerCAmelCase )
__snake_case = kwargs.pop("labels" , __lowerCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
__snake_case = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
elif input_ids is not None:
__snake_case = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
else:
__snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(__lowerCAmelCase , __lowerCAmelCase ) and "input_ids" in labels[0]):
__snake_case = self.tokenizer.pad(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = targets["input_ids"]
else:
__snake_case = self.feature_extractor.feature_size
__snake_case = self.feature_extractor.num_mel_bins
__snake_case = self.feature_extractor.pad(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = feature_size_hack
__snake_case = targets["input_values"]
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
| 707 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase( _a ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=1_3 , SCREAMING_SNAKE_CASE : str=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=9_9 , SCREAMING_SNAKE_CASE : str=3_2 , SCREAMING_SNAKE_CASE : Optional[int]=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=5_1_2 , SCREAMING_SNAKE_CASE : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple="None" , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[str]=None , ) -> Tuple:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = relative_attention
__snake_case = position_biased_input
__snake_case = pos_att_type
__snake_case = scope
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
__snake_case = DebertaVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0]
__snake_case = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0]
__snake_case = model(SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = DebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = DebertaVaForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = DebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = DebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = DebertaVaForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase( _a , _a , unittest.TestCase ):
snake_case_ : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case_ : Optional[Any] = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ : Dict = True
snake_case_ : Union[str, Any] = False
snake_case_ : Tuple = False
snake_case_ : List[str] = False
snake_case_ : List[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
__snake_case = DebertaVaModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DebertaVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
__snake_case = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
__snake_case = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__snake_case = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 473 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a__ = logging.get_logger(__name__)
def A__ (snake_case : Optional[int] , snake_case : str , snake_case : Union[str, Any] , snake_case : int=None , snake_case : Union[str, Any]=None ) -> Any:
# Recurse if needed
if "." in tensor_name:
__UpperCamelCase : List[str] = tensor_name.split(""".""" )
for split in splits[:-1]:
__UpperCamelCase : Optional[Any] = getattr(snake_case , snake_case )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
__UpperCamelCase : Union[str, Any] = new_module
__UpperCamelCase : List[str] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
__UpperCamelCase : List[Any] = tensor_name in module._buffers
__UpperCamelCase : Optional[Any] = getattr(snake_case , snake_case )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
__UpperCamelCase : Dict = False
__UpperCamelCase : int = False
else:
__UpperCamelCase : Optional[Any] = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__UpperCamelCase : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__UpperCamelCase : Tuple = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__UpperCamelCase : Optional[int] = old_value.to(snake_case )
elif isinstance(snake_case , torch.Tensor ):
__UpperCamelCase : Tuple = value.to("""cpu""" )
if value.dtype == torch.inta:
__UpperCamelCase : Any = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
__UpperCamelCase : List[Any] = torch.tensor(snake_case , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case ) and fpaa_statistics is None:
__UpperCamelCase : Optional[Any] = new_value.T
__UpperCamelCase : Any = old_value.__dict__
if is_abit:
__UpperCamelCase : List[Any] = bnb.nn.IntaParams(snake_case , requires_grad=snake_case , **snake_case ).to(snake_case )
elif is_abit:
__UpperCamelCase : Any = bnb.nn.Paramsabit(snake_case , requires_grad=snake_case , **snake_case ).to(snake_case )
__UpperCamelCase : Any = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(snake_case ) )
else:
if value is None:
__UpperCamelCase : Optional[Any] = old_value.to(snake_case )
elif isinstance(snake_case , torch.Tensor ):
__UpperCamelCase : int = value.to(snake_case )
else:
__UpperCamelCase : Dict = torch.tensor(snake_case , device=snake_case )
if is_buffer:
__UpperCamelCase : List[Any] = new_value
else:
__UpperCamelCase : Tuple = nn.Parameter(snake_case , requires_grad=old_value.requires_grad )
__UpperCamelCase : Dict = new_value
def A__ (snake_case : Dict , snake_case : Any=None , snake_case : Union[str, Any]=None , snake_case : List[Any]=None , snake_case : Optional[Any]=False ) -> Optional[Any]:
for name, module in model.named_children():
if current_key_name is None:
__UpperCamelCase : Tuple = []
current_key_name.append(snake_case )
if (isinstance(snake_case , nn.Linear ) or isinstance(snake_case , snake_case )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(snake_case ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case , snake_case ):
__UpperCamelCase , __UpperCamelCase : str = module.weight.shape
else:
__UpperCamelCase : List[Any] = module.in_features
__UpperCamelCase : Any = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__UpperCamelCase : Dict = bnb.nn.LinearabitLt(
snake_case , snake_case , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__UpperCamelCase : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__UpperCamelCase : List[Any] = bnb.nn.Linearabit(
snake_case , snake_case , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__UpperCamelCase : Optional[int] = True
# Store the module class in case we need to transpose the weight later
__UpperCamelCase : Union[str, Any] = type(snake_case )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case )
if len(list(module.children() ) ) > 0:
__UpperCamelCase , __UpperCamelCase : Tuple = _replace_with_bnb_linear(
snake_case , snake_case , snake_case , snake_case , has_been_replaced=snake_case , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A__ (snake_case : Union[str, Any] , snake_case : int=None , snake_case : Tuple=None , snake_case : Optional[int]=None ) -> Optional[int]:
__UpperCamelCase : Dict = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
__UpperCamelCase , __UpperCamelCase : List[str] = _replace_with_bnb_linear(
snake_case , snake_case , snake_case , snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def A__ (*snake_case : int , **snake_case : Optional[int] ) -> int:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , snake_case , )
return replace_with_bnb_linear(*snake_case , **snake_case )
def A__ (*snake_case : Any , **snake_case : Dict ) -> Tuple:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , snake_case , )
return set_module_quantized_tensor_to_device(*snake_case , **snake_case )
def A__ (snake_case : Any ) -> int:
__UpperCamelCase : List[str] = deepcopy(snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__UpperCamelCase : int = find_tied_parameters(snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case , snake_case ):
__UpperCamelCase : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCamelCase : Dict = sum(snake_case , [] )
__UpperCamelCase : int = len(snake_case ) > 0
# Check if it is a base model
__UpperCamelCase : Tuple = not hasattr(snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCamelCase : int = list(model.named_children() )
__UpperCamelCase : str = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCamelCase : str = set(snake_case ) - set(snake_case )
__UpperCamelCase : List[str] = list(set(snake_case ) ) + list(snake_case )
# remove ".weight" from the keys
__UpperCamelCase : Optional[int] = [""".weight""", """.bias"""]
__UpperCamelCase : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCamelCase : Dict = name.replace(snake_case , """""" )
filtered_module_names.append(snake_case )
return filtered_module_names
| 279 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any , lowerCAmelCase : str ) -> List[Any]:
"""simple docstring"""
with open(lowerCAmelCase , encoding="""utf-8""" ) as input_file:
__UpperCamelCase : Union[str, Any] = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__UpperCamelCase : str = input_file.read()
__UpperCamelCase : Optional[int] = regexp.search(lowerCAmelCase )
return match
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(lowerCAmelCase , encoding="""utf-8""" ) as input_file:
__UpperCamelCase : Dict = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__UpperCamelCase : str = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase : List[Any] = regexp.finditer(lowerCAmelCase )
__UpperCamelCase : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Optional[int] = Path("""./datasets""" )
__UpperCamelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCAmelCase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__UpperCamelCase : Optional[int] = Path("""./datasets""" )
__UpperCamelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCAmelCase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 279 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ ( UpperCamelCase_ ):
def __init__( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=1_3 , _lowerCamelCase : List[str]=7 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[Any]=9_9 , _lowerCamelCase : str=3_2 , _lowerCamelCase : Optional[Any]=5 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[Any]=3_7 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Optional[int]=5_1_2 , _lowerCamelCase : Dict=1_6 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="None" , _lowerCamelCase : Dict=3 , _lowerCamelCase : int=4 , _lowerCamelCase : List[Any]=None , ):
snake_case__ : List[str] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : int = seq_length
snake_case__ : str = is_training
snake_case__ : Tuple = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : List[Any] = use_labels
snake_case__ : Dict = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Any = type_vocab_size
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : str = num_choices
snake_case__ : Any = relative_attention
snake_case__ : Dict = position_biased_input
snake_case__ : int = pos_att_type
snake_case__ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Dict ):
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : List[str] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Any = None
snake_case__ : int = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : int ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Dict = self.get_config()
snake_case__ : Union[str, Any] = 3_0_0
return config
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : List[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
snake_case__ : Any = DebertaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
snake_case__ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )[0]
snake_case__ : Tuple = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )[0]
snake_case__ : Dict = model(_lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ):
snake_case__ : int = DebertaForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
snake_case__ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : int ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Any = DebertaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_lowerCamelCase )
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : List[str] = DebertaForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
snake_case__ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
snake_case__ : List[Any] = DebertaForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
snake_case__ : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : int ):
snake_case__ : str = self.prepare_config_and_inputs()
(
snake_case__
) : Tuple = config_and_inputs
snake_case__ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =(
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase =(
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase =True
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : Dict = DebertaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : int ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = DebertaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
@slow
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Tuple = DebertaModel.from_pretrained('microsoft/deberta-base' )
snake_case__ : int = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
# compare the actual values for a slice.
snake_case__ : Dict = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 701 |
def lowercase__( A ):
return " ".join(
''.join(word[::-1] ) if len(A ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 303 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 82 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCamelCase = False
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = generator.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A painting of a squirrel eating a burger "
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 | 1 |
'''simple docstring'''
from copy import deepcopy
class __snake_case :
def __init__( self : Any , _UpperCAmelCase : list[int] | None = None , _UpperCAmelCase : int | None = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
_lowerCAmelCase : int = size
_lowerCAmelCase : Union[str, Any] = [0] * size
elif arr is not None:
self.init(_UpperCAmelCase )
else:
raise ValueError("""Either arr or size must be specified""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : list[int] ) -> None:
'''simple docstring'''
_lowerCAmelCase : str = len(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = deepcopy(_UpperCAmelCase )
for i in range(1 , self.size ):
_lowerCAmelCase : List[str] = self.next_(_UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> list[int]:
'''simple docstring'''
_lowerCAmelCase : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_lowerCAmelCase : Optional[int] = self.next_(_UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return index - (index & (-index))
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_lowerCAmelCase : str = self.next_(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
self.add(_UpperCAmelCase , value - self.get(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if right == 0:
return 0
_lowerCAmelCase : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_lowerCAmelCase : Optional[int] = self.prev(_UpperCAmelCase )
return result
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.prefix(_UpperCAmelCase ) - self.prefix(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return self.query(_UpperCAmelCase , index + 1 )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
_lowerCAmelCase : Any = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_lowerCAmelCase : str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : int=13 , _UpperCAmelCase : Union[str, Any]=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=0.6 , _UpperCAmelCase : Any=None , ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Tuple = mask_ratio
_lowerCAmelCase : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : int = ViTMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : int = model(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = (self.image_size // self.patch_size) ** 2
_lowerCAmelCase : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Dict = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : str = model(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = ViTMAEModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(_UpperCAmelCase )
_lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
_lowerCAmelCase : Optional[int] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCAmelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase : Tuple = torch.from_numpy(_UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase : Any = pt_noise
super().check_pt_tf_models(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = outputs[0].cpu().numpy()
_lowerCAmelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
_lowerCAmelCase : str = model_class.from_pretrained(_UpperCAmelCase )
model.to(_UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
# Make sure we don't have nans
_lowerCAmelCase : int = after_outputs[0].cpu().numpy()
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = ViTMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
_lowerCAmelCase : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Any = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase : Optional[int] = ViTMAEConfig()
_lowerCAmelCase : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**_UpperCAmelCase , noise=torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase : str = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_UpperCAmelCase ) , atol=1E-4 ) )
| 196 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = UnCLIPImageVariationPipeline
_lowerCamelCase = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
_lowerCamelCase = IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
_lowerCamelCase = False
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return self.time_input_dim
@property
def snake_case_ ( self ):
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
return 100
@property
def snake_case_ ( self ):
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__A )
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
__a = UnCLIPTextProjModel(**__A )
return model
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
__a = UNetaDConditionModel(**__A )
return model
@property
def snake_case_ ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def snake_case_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__a = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def snake_case_ ( self ):
__a = self.dummy_decoder
__a = self.dummy_text_proj
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_super_res_first
__a = self.dummy_super_res_last
__a = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
__a = CLIPImageProcessor(crop_size=32 , size=32 )
__a = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def snake_case_ ( self , __A , __A=0 , __A=True ):
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(__A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
__a = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inputs(__A , pil_image=__A )
__a = pipe(**__A )
__a = output.images
__a = self.get_dummy_inputs(__A , pil_image=__A )
__a = pipe(
**__A , return_dict=__A , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
__a = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inputs(__A , pil_image=__A )
__a = pipe(**__A )
__a = output.images
__a = self.get_dummy_inputs(__A , pil_image=__A )
__a = pipe(
**__A , return_dict=__A , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
__a = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = self.get_dummy_inputs(__A , pil_image=__A )
__a = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
__a = pipe(**__A )
__a = output.images
__a = self.get_dummy_inputs(__A , pil_image=__A )
__a = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
__a = pipe(
**__A , return_dict=__A , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__a = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
__a = torch.device("""cpu""" )
class __UpperCAmelCase :
"""simple docstring"""
_lowerCamelCase = 1
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
__a = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = torch.Generator(device=__A ).manual_seed(0 )
__a = pipe.decoder.dtype
__a = 1
__a = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__a = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
__a = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__a = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
__a = self.get_dummy_inputs(__A , pil_image=__A )
__a = pipe(
**__A , decoder_latents=__A , super_res_latents=__A ).images
__a = self.get_dummy_inputs(__A , pil_image=__A )
# Don't pass image, instead pass embedding
__a = pipeline_inputs.pop("""image""" )
__a = pipe.image_encoder(__A ).image_embeds
__a = pipe(
**__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def snake_case_ ( self ):
__a = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__a = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__A , expected_max_diff=__A )
@skip_mps
def snake_case_ ( self ):
__a = torch_device == """cpu"""
__a = True
__a = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , )
def snake_case_ ( self ):
__a = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__a = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__A )
@skip_mps
def snake_case_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case_ ( self ):
return super().test_save_load_local()
@skip_mps
def snake_case_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
__a = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
__a = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
__a = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a = pipeline(
__A , generator=__A , output_type="""np""" , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__A , __A , 15 )
| 99 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCAmelCase__ = getattr(snake_case__ , snake_case__ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ = True
elif name.split('.' )[0] == "proj":
lowerCAmelCase__ = fairseq_model.proj
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(snake_case__ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
lowerCAmelCase__ = 'weight'
else:
lowerCAmelCase__ = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ = name.split('.' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( snake_case__ ) -> Any:
"""simple docstring"""
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [line.split(' ' )[0] for line in lines]
lowerCAmelCase__ = len(snake_case__ )
lowerCAmelCase__ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(snake_case__ )
lowerCAmelCase__ = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCAmelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ = WavaVecaModel(snake_case__ )
lowerCAmelCase__ = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
lowerCAmelCase__ = SpeechaTextaForCausalLM(snake_case__ )
lowerCAmelCase__ , lowerCAmelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCAmelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCAmelCase__ = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
lowerCAmelCase__ = False
# add projection layer
lowerCAmelCase__ = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(snake_case__ , snake_case__ )
lowerCAmelCase__ = SpeechaTextaTokenizer(os.path.join(snake_case__ , 'vocab.json' ) )
tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase__ = hf_wavavec.config.to_dict()
lowerCAmelCase__ = tokenizer.pad_token_id
lowerCAmelCase__ = tokenizer.bos_token_id
lowerCAmelCase__ = tokenizer.eos_token_id
lowerCAmelCase__ = 'speech_to_text_2'
lowerCAmelCase__ = 'wav2vec2'
lowerCAmelCase__ = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 193 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = XLMProphetNetTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def lowerCamelCase__ ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : str = XLMProphetNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = '''[PAD]'''
lowerCAmelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCamelCase_ ) , 1_0_1_2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = XLMProphetNetTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
lowerCAmelCase : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase__ ( self : Tuple ):
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = '''Hello World!'''
lowerCAmelCase : Any = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
# fmt: off
lowerCAmelCase : Dict = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 637 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ):
lowerCAmelCase : Dict = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ):
lowerCAmelCase : Optional[int] = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' )
torch.save(scheduler.state_dict() , _snake_case )
lowerCAmelCase : List[Any] = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase : Any = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase = 10
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ):
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data
lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class snake_case_:
def __init__( self : List[Any] , UpperCamelCase_ : Any ):
lowerCAmelCase : Tuple = fn
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ):
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
| 637 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowerCamelCase = float("""nan""")
class _snake_case :
"""simple docstring"""
def __init__( self , a ) -> Dict:
"""simple docstring"""
_A = sys.stdout
_A = open(a , '''a''' )
def __getattr__( self , a ) -> List[Any]:
"""simple docstring"""
return getattr(self.stdout , a )
def lowercase_ ( self , a ) -> Optional[Any]:
"""simple docstring"""
self.stdout.write(a )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , a , 0 , re.M ) )
def UpperCAmelCase__ ( __snake_case=80 , __snake_case=False ) -> str:
_A = []
# deal with critical env vars
_A = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_A = os.environ.get(__snake_case , __snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
_A = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(__snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A = []
_A = ''''''
while len(__snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(__snake_case ) == 0 or len(__snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__snake_case )
_A = ''''''
return "\\\n".join(__snake_case )
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> int:
# unwrap multi-line input
_A = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
_A = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_A = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A = subprocess.run(__snake_case , capture_output=__snake_case , text=__snake_case )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
_A = variation.replace(''' ''' , '''-''' )
with open(Path(__snake_case ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(__snake_case ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
_A = json.load(__snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
_A = []
_A = []
_A = F'''{id}: {variation:<{longest_variation_len}}'''
_A = F'''{preamble}: '''
_A = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__snake_case ) , desc=__snake_case , leave=__snake_case ):
_A = process_run_single(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_A = single_run_metrics[target_metric_key]
if not math.isnan(__snake_case ):
metrics.append(__snake_case )
results.append(__snake_case )
outcome += "✓"
else:
outcome += "✘"
_A = F'''\33[2K\r{outcome}'''
if len(__snake_case ) > 0:
_A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A = round(mean_metrics[target_metric_key] , 2 )
_A = F'''{outcome} {mean_target}'''
if len(__snake_case ) > 1:
results_str += F''' {tuple(round(__snake_case , 2 ) for x in results )}'''
print(__snake_case )
_A = variation
return mean_metrics
else:
print(__snake_case )
return {variation_key: variation, target_metric_key: nan}
def UpperCAmelCase__ ( ) -> List[str]:
_A = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
_A = pd.DataFrame(__snake_case )
_A = '''variation'''
_A = '''diff_%'''
_A = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__snake_case ):
# as a fallback, use the minimal value as the sentinel
_A = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__snake_case ):
_A = df.apply(
lambda __snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
_A = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A = df.reindex(__snake_case , axis='''columns''' ) # reorder cols
# capitalize
_A = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
_A = df.rename(lambda __snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
_A = df.rename(lambda __snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
_A = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__snake_case , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__snake_case , floatfmt='''.2f''' )]
print('''\n\n'''.join(__snake_case ) )
def UpperCAmelCase__ ( ) -> Optional[Any]:
_A = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__snake_case , type=__snake_case , nargs='''+''' , required=__snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__snake_case , type=__snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__snake_case , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
_A = parser.parse_args()
_A = args.output_dir
Path(__snake_case ).mkdir(exist_ok=__snake_case )
_A = get_base_command(__snake_case , __snake_case )
# split each dimension into its --foo variations
_A = [list(map(str.strip , re.split(R'''\|''' , __snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A = list(map(str.strip , map(''' '''.join , itertools.product(*__snake_case ) ) ) )
_A = max(len(__snake_case ) for x in variations )
# split wanted keys
_A = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
_A = Tee(__snake_case )
print(F'''\n*** Running {len(__snake_case )} benchmarks:''' )
print(F'''Base command: {" ".join(__snake_case )}''' )
_A = '''variation'''
_A = []
for id, variation in enumerate(tqdm(__snake_case , desc='''Total completion: ''' , leave=__snake_case ) ):
_A = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __snake_case , __snake_case , __snake_case , __snake_case , args.target_metric_key , __snake_case , args.repeat_times , __snake_case , args.verbose , ) )
process_results(__snake_case , args.target_metric_key , __snake_case , args.base_variation , __snake_case )
if __name__ == "__main__":
main() | 317 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> str:
# Initialise PyTorch model
_A = BertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
_A = BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 317 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49 | import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
A : Any = None
A : str = {
'7B': 1_1_0_0_8,
'13B': 1_3_8_2_4,
'30B': 1_7_9_2_0,
'65B': 2_2_0_1_6,
'70B': 2_8_6_7_2,
}
A : Optional[Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __lowerCAmelCase ( a__ , a__=1 , a__=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCAmelCase ( a__ ) -> Any:
with open(a__ , '''r''' ) as f:
return json.load(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> Optional[Any]:
with open(a__ , '''w''' ) as f:
json.dump(a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ , a__=True ) -> List[str]:
os.makedirs(a__ , exist_ok=a__ )
__a = os.path.join(a__ , '''tmp''' )
os.makedirs(a__ , exist_ok=a__ )
__a = read_json(os.path.join(a__ , '''params.json''' ) )
__a = NUM_SHARDS[model_size]
__a = params['''n_layers''']
__a = params['''n_heads''']
__a = n_heads // num_shards
__a = params['''dim''']
__a = dim // n_heads
__a = 10_000.0
__a = 1.0 / (base ** (torch.arange(0 , a__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__a = params['''n_kv_heads'''] # for GQA / MQA
__a = n_heads_per_shard // num_key_value_heads
__a = dim // num_key_value_heads
else: # compatibility with other checkpoints
__a = n_heads
__a = n_heads_per_shard
__a = dim
# permute for sliced rotary
def permute(a__ , a__=n_heads , a__=dim , a__=dim ):
return w.view(a__ , dima // n_heads // 2 , 2 , a__ ).transpose(1 , 2 ).reshape(a__ , a__ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__a = torch.load(os.path.join(a__ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
__a = [
torch.load(os.path.join(a__ , F"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(a__ )
]
__a = 0
__a = {'''weight_map''': {}}
for layer_i in range(a__ ):
__a = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
__a = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__a = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
__a = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(a__ , a__ , a__ )
for i in range(a__ )
] , dim=0 , ).reshape(a__ , a__ ) )
__a = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
a__ , a__ , a__ )
for i in range(a__ )
] , dim=0 , ).reshape(a__ , a__ ) , a__ , a__ , a__ , )
__a = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
a__ , a__ , a__ )
for i in range(a__ )
] , dim=0 , ).reshape(a__ , a__ )
__a = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(a__ )] , dim=1 )
__a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(a__ )] , dim=0 )
__a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(a__ )] , dim=1 )
__a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(a__ )] , dim=0 )
__a = inv_freq
for k, v in state_dict.items():
__a = filename
param_count += v.numel()
torch.save(a__ , os.path.join(a__ , a__ ) )
__a = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
__a = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
__a = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(a__ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(a__ )] , dim=0 ),
}
for k, v in state_dict.items():
__a = filename
param_count += v.numel()
torch.save(a__ , os.path.join(a__ , a__ ) )
# Write configs
__a = {'''total_size''': param_count * 2}
write_json(a__ , os.path.join(a__ , '''pytorch_model.bin.index.json''' ) )
__a = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
__a = params['''multiple_of'''] if '''multiple_of''' in params else 256
__a = LlamaConfig(
hidden_size=a__ , intermediate_size=compute_intermediate_size(a__ , a__ , a__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=a__ , )
config.save_pretrained(a__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
__a = LlamaForCausalLM.from_pretrained(a__ , torch_dtype=torch.floataa , low_cpu_mem_usage=a__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(a__ , safe_serialization=a__ )
shutil.rmtree(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> int:
# Initialize the tokenizer based on the `spm` model
__a = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
__a = tokenizer_class(a__ )
tokenizer.save_pretrained(a__ )
def __lowerCAmelCase ( ) -> List[str]:
__a = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=a__ , help='''Whether or not to save using `safetensors`.''' )
__a = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__a = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , a__ )
if __name__ == "__main__":
main() | 219 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __A( unittest.TestCase ):
def __init__( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = parent
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {}
def __lowerCAmelCase ( ) -> Dict:
__a = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__a = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __A( a , unittest.TestCase ):
snake_case_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = MarkupLMFeatureExtractionTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.feature_extraction_class()
# Test not batched input
__a = get_html_strings()[0]
__a = feature_extractor(_snake_case )
# fmt: off
__a = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__a = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , _snake_case )
self.assertEqual(encoding.xpaths , _snake_case )
# Test batched
__a = get_html_strings()
__a = feature_extractor(_snake_case )
# fmt: off
__a = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__a = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _snake_case )
self.assertEqual(encoding.xpaths , _snake_case ) | 219 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = (IPNDMScheduler,)
_lowercase : int = (('''num_inference_steps''', 5_0),)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {"""num_train_timesteps""": 1_000}
config.update(**_lowercase )
return config
def _lowercase ( self , _lowercase=0 , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _lowercase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowercase )
_lowerCAmelCase = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self , _lowercase=0 , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _lowercase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowercase )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowercase , _lowercase )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowercase , _lowercase )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _lowercase )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , """set_timesteps""" ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , """set_timesteps""" ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.timesteps[5]
_lowerCAmelCase = scheduler.timesteps[6]
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase , time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_lowercase , time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 162 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = np.max(__lowerCamelCase , axis=-1 , keepdims=__lowerCamelCase )
_lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "second_text" in kwargs:
_lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None ):
"""simple docstring"""
return self.tokenizer(_lowercase , text_pair=_lowercase , return_tensors=self.framework )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.model(**_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.logits[0].numpy()
_lowerCAmelCase = softmax(_lowercase )
_lowerCAmelCase = np.argmax(_lowercase )
_lowerCAmelCase = self.model.config.idalabel[best_class]
_lowerCAmelCase = probabilities[best_class].item()
_lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 162 | 1 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a_ = logging.getLogger(__name__)
def __lowercase ( snake_case_ : List[Any]=2 ,snake_case_ : Tuple=3 ,snake_case_ : Optional[Any]=16 ,snake_case_ : Optional[int] = 10 ,snake_case_ : Union[str, Any] = 2 ) ->Optional[Any]:
'''simple docstring'''
def get_dataset(snake_case_ : int ):
__A : Dict = torch.randn(batch_size * n_batches ,1 )
return TensorDataset(__lowerCAmelCase ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1 ) )
__A : Union[str, Any] = get_dataset(__lowerCAmelCase )
__A : Union[str, Any] = get_dataset(__lowerCAmelCase )
__A : Optional[int] = DataLoader(__lowerCAmelCase ,shuffle=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,num_workers=4 )
__A : Union[str, Any] = DataLoader(__lowerCAmelCase ,shuffle=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowercase ( snake_case_ : Optional[int] ,snake_case_ : List[str] ,snake_case_ : str ,snake_case_ : Union[str, Any] ,snake_case_ : Any ,snake_case_ : Optional[int]=None ) ->Union[str, Any]:
'''simple docstring'''
__A : List[Any] = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
__A , __A : Dict = batch
__A : Tuple = model(__lowerCAmelCase )
__A : str = torch.nn.functional.mse_loss(__lowerCAmelCase ,__lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
__A : int = nn.Parameter(torch.randn(1 ) )
__A : Dict = nn.Parameter(torch.randn(1 ) )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
return x * self.a + self.b
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : List[Any] = DummyModel()
__A : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : Union[str, Any] = dummy_dataloaders()
__A : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=_lowerCamelCase , automatic_checkpoint_naming=_lowerCamelCase )
# Train baseline
__A : List[str] = Accelerator(project_config=_lowerCamelCase )
__A , __A , __A , __A : Optional[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCamelCase__( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : Tuple = DummyModel()
__A : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : List[str] = dummy_dataloaders()
# Train baseline
__A : str = Accelerator()
__A , __A , __A , __A : str = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
__A : Optional[int] = os.path.join(_lowerCamelCase , '''initial''' )
accelerator.save_state(_lowerCamelCase )
((__A) , (__A)) : Tuple = model.a.item(), model.b.item()
__A : Union[str, Any] = optimizer.state_dict()
__A : str = train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__A) , (__A)) : str = model.a.item(), model.b.item()
__A : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
__A : List[str] = DummyModel()
__A : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : List[str] = dummy_dataloaders()
__A : Tuple = Accelerator()
__A , __A , __A , __A : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.load_state(_lowerCamelCase )
((__A) , (__A)) : List[str] = model.a.item(), model.b.item()
__A : List[str] = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
__A : Optional[int] = train(2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save everything
__A : int = os.path.join(_lowerCamelCase , '''checkpoint''' )
accelerator.save_state(_lowerCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_lowerCamelCase )
test_rands += train(1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__A) , (__A)) : str = model.a.item(), model.b.item()
__A : List[str] = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : str = DummyModel()
__A : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : Any = dummy_dataloaders()
__A : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase )
# Train baseline
__A : List[Any] = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
__A , __A , __A , __A : Dict = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
accelerator.save_state()
((__A) , (__A)) : Union[str, Any] = model.a.item(), model.b.item()
__A : Union[str, Any] = optimizer.state_dict()
__A : Tuple = train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__A) , (__A)) : str = model.a.item(), model.b.item()
__A : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
__A : int = DummyModel()
__A : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : Any = dummy_dataloaders()
__A : Union[str, Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowerCamelCase )
__A : Union[str, Any] = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
__A , __A , __A , __A : Tuple = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.load_state(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
((__A) , (__A)) : Union[str, Any] = model.a.item(), model.b.item()
__A : Optional[Any] = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
__A : Tuple = train(2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__A) , (__A)) : List[str] = model.a.item(), model.b.item()
__A : List[str] = optimizer.state_dict()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = torch.tensor([1, 2, 3] )
__A : int = torch.tensor([2, 3, 4] )
__A : Dict = DummyModel()
__A : Optional[Any] = torch.optim.Adam(net.parameters() )
__A : Tuple = Accelerator()
with self.assertRaises(_lowerCamelCase ) as ve:
accelerator.register_for_checkpointing(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__A : Union[str, Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def UpperCamelCase__( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : int = DummyModel()
__A : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A : Union[str, Any] = torch.optim.lr_scheduler.StepLR(_lowerCamelCase , step_size=1 , gamma=0.9_9 )
__A , __A : int = dummy_dataloaders()
__A : str = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase )
# Train baseline
__A : Optional[int] = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
__A , __A , __A , __A , __A : Union[str, Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save initial
accelerator.save_state()
__A : List[Any] = scheduler.state_dict()
train(3 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertNotEqual(_lowerCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_lowerCamelCase , scheduler.state_dict() )
def UpperCamelCase__( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : Any = DummyModel()
__A : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=_lowerCamelCase , total_limit=2 )
# Train baseline
__A : List[Any] = Accelerator(project_dir=_lowerCamelCase , project_config=_lowerCamelCase )
__A : Optional[int] = accelerator.prepare(_lowerCamelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
a_ = """/tmp/accelerate/state_checkpointing"""
a_ = DummyModel()
a_ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
a_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
a_ , a_ = dummy_dataloaders()
a_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a_ , a_ , a_ , a_ , a_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a_ , a_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a_ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
a_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
a_ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
a_ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 177 | """simple docstring"""
from math import sqrt
def lowerCamelCase_ ( __lowerCAmelCase ) -> bool:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ =True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ =False
for divisor in range(2 , int(round(sqrt(__lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ =False
break
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'status' must been from type bool"
return status
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ =list(range(2 , n + 1 ) )
lowerCamelCase__ =[] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + 1 , len(__lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ =0
# filters actual prime numbers.
lowerCamelCase__ =[x for x in begin_list if x != 0]
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ =[]
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__lowerCAmelCase ):
ans.append(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ =[] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ =2
lowerCamelCase__ =number
if number == 0 or number == 1:
ans.append(__lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__lowerCAmelCase ):
while quotient != 1:
if is_prime(__lowerCAmelCase ) and (quotient % factor == 0):
ans.append(__lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ =0
# prime factorization of 'number'
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
lowerCamelCase__ =max(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ =0
# prime factorization of 'number'
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
lowerCamelCase__ =min(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (number > 2) and is_even(__lowerCAmelCase )
), "'number' must been an int, even and > 2"
lowerCamelCase__ =[] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ =get_prime_numbers(__lowerCAmelCase )
lowerCamelCase__ =len(__lowerCAmelCase )
# run variable for while-loops.
lowerCamelCase__ =0
lowerCamelCase__ =None
# exit variable. for break up the loops
lowerCamelCase__ =True
while i < len_pn and loop:
lowerCamelCase__ =i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ =False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (len(__lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ =0
while numbera != 0:
lowerCamelCase__ =numbera % numbera
lowerCamelCase__ =numbera
lowerCamelCase__ =rest
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ =1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
lowerCamelCase__ =prime_factorization(__lowerCAmelCase )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ =[]
lowerCamelCase__ =[]
lowerCamelCase__ =max(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =0
lowerCamelCase__ =0
lowerCamelCase__ =[] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
for _ in range(max(__lowerCAmelCase , __lowerCAmelCase ) ):
ans *= n
else:
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
ans *= n
done.append(__lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ =prime_fac_a.count(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
ans *= n
done.append(__lowerCAmelCase )
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ =0
lowerCamelCase__ =2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and is_prime(
__lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
assert (
is_prime(__lowerCAmelCase ) and is_prime(__lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ =p_number_a + 1 # jump to the next number
lowerCamelCase__ =[] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(__lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(__lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(__lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ =[] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(__lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ =get_divisors(__lowerCAmelCase )
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(__lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ =gcd(abs(__lowerCAmelCase ) , abs(__lowerCAmelCase ) )
# precondition
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ =1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase_ ( __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ =0
lowerCamelCase__ =1
lowerCamelCase__ =1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ =ans
ans += fiba
lowerCamelCase__ =tmp
return ans
| 530 | 0 |
from __future__ import annotations
from collections.abc import Callable
__lowerCamelCase : int = list[list[float | int]]
def A__ ( _a : Matrix , _a : Matrix ):
'''simple docstring'''
snake_case__ : int =len(_a )
snake_case__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(_a )]
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : float
for row in range(_a ):
for col in range(_a ):
snake_case__ : Optional[int] =matrix[row][col]
snake_case__ : int =vector[row][0]
snake_case__ : Any =0
snake_case__ : Dict =0
while row < size and col < size:
# pivoting
snake_case__ : Dict =max((abs(augmented[rowa][col] ), rowa) for rowa in range(_a , _a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case__ , snake_case__ : Union[str, Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _a ):
snake_case__ : Dict =augmented[rowa][col] / augmented[row][col]
snake_case__ : List[str] =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _a ):
for row in range(_a ):
snake_case__ : Tuple =augmented[row][col] / augmented[col][col]
for cola in range(_a , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_a )
]
def A__ ( _a : list[int] ):
'''simple docstring'''
snake_case__ : int =len(_a )
snake_case__ : Matrix =[[0 for _ in range(_a )] for _ in range(_a )]
snake_case__ : Matrix =[[0] for _ in range(_a )]
snake_case__ : Matrix
snake_case__ : int
snake_case__ : int
snake_case__ : int
for x_val, y_val in enumerate(_a ):
for col in range(_a ):
snake_case__ : Tuple =(x_val + 1) ** (size - col - 1)
snake_case__ : Optional[Any] =y_val
snake_case__ : List[Any] =solve(_a , _a )
def interpolated_func(_a : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_a ) )
return interpolated_func
def A__ ( _a : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( _a : Callable[[int], int] = question_function , _a : int = 10 ):
'''simple docstring'''
snake_case__ : list[int] =[func(_a ) for x_val in range(1 , order + 1 )]
snake_case__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case__ : int =0
snake_case__ : Callable[[int], int]
snake_case__ : int
for poly in polynomials:
snake_case__ : Any =1
while func(_a ) == poly(_a ):
x_val += 1
ret += poly(_a )
return ret
if __name__ == "__main__":
print(F"{solution() = }")
| 448 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCamelCase : Dict = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__lowerCamelCase : List[str] = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowercase__ ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowercase__ ( self , a , a , a = CHRF.CHAR_ORDER , a = CHRF.WORD_ORDER , a = CHRF.BETA , a = False , a = False , a = False , ):
snake_case__ : str =len(references[0] )
if any(len(a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case__ : List[Any] =[[refs[i] for refs in references] for i in range(a )]
snake_case__ : int =CHRF(a , a , a , a , a , a )
snake_case__ : int =sb_chrf.corpus_score(a , a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 448 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = 'RegNetConfig'
# Base docstring
_UpperCAmelCase = 'facebook/regnet-y-040'
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = 'facebook/regnet-y-040'
_UpperCAmelCase = 'tabby, tabby cat'
_UpperCAmelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding='VALID' , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name='convolution' , )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.convolution(self.padding(_UpperCAmelCase ) )
A_ : Tuple = self.normalization(_UpperCAmelCase )
A_ : Optional[int] = self.activation(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : List[str] = config.num_channels
A_ : Optional[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Tuple = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
A_ : Optional[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : Optional[int] = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name='convolution' )
A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name='pooler' )
A_ : Union[str, Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
A_ : Union[str, Any] = layer_module(_UpperCAmelCase )
A_ : Any = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : int = in_channels != out_channels or stride != 1
A_ : List[Any] = max(1 , out_channels // config.groups_width )
A_ : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : int = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name='layer.2' ),
]
A_ : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : Union[str, Any] = layer_module(_UpperCAmelCase )
A_ : List[str] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
A_ : List[Any] = self.activation(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Dict = max(1 , out_channels // config.groups_width )
A_ : Tuple = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : Dict = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name='layer.3' ),
]
A_ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(_UpperCAmelCase )
A_ : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
A_ : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : List[Any] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : int = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name='layers.0' ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : List[str] = layer_module(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : str = stage_module(_UpperCAmelCase )
if output_hidden_states:
A_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
A_ : Dict = config
A_ : Optional[int] = TFRegNetEmbeddings(_UpperCAmelCase , name='embedder' )
A_ : Any = TFRegNetEncoder(_UpperCAmelCase , name='encoder' )
A_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
A_ : List[Any] = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
A_ : Dict = encoder_outputs[0]
A_ : Union[str, Any] = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
A_ : int = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
A_ : Optional[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : Union[str, Any] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCAmelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
A_ : List[Any] = TFRegNetMainLayer(_UpperCAmelCase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase__ , )
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
A_ : List[Any] = config.num_labels
A_ : Union[str, Any] = TFRegNetMainLayer(_UpperCAmelCase , name='regnet' )
# classification head
A_ : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[str] = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : Tuple = self.classifier[0](_UpperCAmelCase )
A_ : List[Any] = self.classifier[1](_UpperCAmelCase )
A_ : Dict = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
A_ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 558 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = path_or_paths
UpperCAmelCase_ : Tuple = split if split or isinstance(__a , __a ) else 'train'
UpperCAmelCase_ : List[str] = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : Any = keep_in_memory
UpperCAmelCase_ : Optional[Any] = streaming
UpperCAmelCase_ : Optional[Any] = num_proc
UpperCAmelCase_ : Tuple = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = features
UpperCAmelCase_ : List[str] = cache_dir
UpperCAmelCase_ : Any = keep_in_memory
UpperCAmelCase_ : Optional[int] = streaming
UpperCAmelCase_ : List[Any] = num_proc
UpperCAmelCase_ : Dict = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
| 717 | '''simple docstring'''
import qiskit
def _lowerCamelCase ( lowerCamelCase_ : int = 2 ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = qubits
# Using Aer's simulator
UpperCAmelCase_ : str = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
UpperCAmelCase_ : List[str] = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCAmelCase_ : Dict = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=1000 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 389 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : Optional[int]=0 ):
"""simple docstring"""
_lowercase : Union[str, Any] = np.random.RandomState(UpperCamelCase )
_lowercase : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : List[Any] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**UpperCamelCase ).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Tuple = self.get_dummy_inputs()
_lowercase : Any = pipe(**UpperCamelCase ).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : str = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Tuple = self.get_dummy_inputs()
_lowercase : str = pipe(**UpperCamelCase ).images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Any = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**UpperCamelCase ).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Optional[Any] = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**UpperCamelCase ).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Optional[Any] = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowercase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = pipe(**UpperCamelCase ).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_lowercase : Union[str, Any] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : str = 3 * [inputs['''prompt''']]
# forward
_lowercase : List[str] = pipe(**UpperCamelCase )
_lowercase : int = output.images[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = self.get_dummy_inputs()
_lowercase : Optional[Any] = 3 * [inputs.pop('''prompt''' )]
_lowercase : Dict = pipe.tokenizer(
UpperCamelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors='''np''' , )
_lowercase : int = text_inputs['''input_ids''']
_lowercase : List[str] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_lowercase : Any = prompt_embeds
# forward
_lowercase : Optional[int] = pipe(**UpperCamelCase )
_lowercase : Dict = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Any = 3 * ['''this is a negative prompt''']
_lowercase : str = negative_prompt
_lowercase : str = 3 * [inputs['''prompt''']]
# forward
_lowercase : List[Any] = pipe(**UpperCamelCase )
_lowercase : List[Any] = output.images[0, -3:, -3:, -1]
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : Any = 3 * [inputs.pop('''prompt''' )]
_lowercase : Dict = []
for p in [prompt, negative_prompt]:
_lowercase : Optional[Any] = pipe.tokenizer(
UpperCamelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase , return_tensors='''np''' , )
_lowercase : Any = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_lowercase , _lowercase : Optional[int] = embeds
# forward
_lowercase : List[str] = pipe(**UpperCamelCase )
_lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : str = ort.SessionOptions()
_lowercase : Optional[Any] = False
return options
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Optional[int] = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
_lowercase : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
_lowercase : List[str] = output.images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_lowercase : Union[str, Any] = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Union[str, Any] = '''open neural network exchange'''
_lowercase : Dict = np.random.RandomState(0 )
_lowercase : Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase , output_type='''np''' )
_lowercase : Optional[Any] = output.images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_lowercase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Tuple = '''open neural network exchange'''
_lowercase : int = np.random.RandomState(0 )
_lowercase : int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase , output_type='''np''' )
_lowercase : Tuple = output.images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Tuple = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = 0
def test_callback_fn(UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : np.ndarray ) -> None:
_lowercase : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[str] = latents[0, -3:, -3:, -1]
_lowercase : int = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowercase : List[str] = latents[0, -3:, -3:, -1]
_lowercase : Union[str, Any] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_lowercase : Dict = False
_lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Any = '''Andromeda galaxy in a bottle'''
_lowercase : List[str] = np.random.RandomState(0 )
pipe(
prompt=UpperCamelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase , callback=UpperCamelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(UpperCamelCase , UpperCamelCase )
assert pipe.safety_checker is None
_lowercase : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase )
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowercase : List[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None | 322 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 322 | 1 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_A = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> Tuple:
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCAmelCase__ : Optional[int] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCAmelCase__ : Optional[Any] = cached_file(__UpperCAmelCase , __UpperCAmelCase , force_download=not use_cached_models )
lowerCAmelCase__ : str = config_class.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = True
print(f"""Building TensorFlow model from configuration: {config}""" )
lowerCAmelCase__ : Union[str, Any] = model_class(__UpperCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCAmelCase__ : List[Any] = cached_file(
__UpperCAmelCase , __UpperCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCAmelCase__ : Any = load_pytorch_checkpoint_in_tfa_model(__UpperCAmelCase , __UpperCAmelCase )
if compare_with_pt_model:
lowerCAmelCase__ : Optional[int] = tf_model(tf_model.dummy_inputs , training=__UpperCAmelCase ) # build the network
lowerCAmelCase__ : str = torch.load(__UpperCAmelCase , map_location="""cpu""" )
lowerCAmelCase__ : str = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__UpperCAmelCase , config=__UpperCAmelCase , state_dict=__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ : List[str] = pt_model(**pt_model.dummy_inputs )
lowerCAmelCase__ : str = pto[0].numpy()
lowerCAmelCase__ : Union[str, Any] = tfo[0].numpy()
lowerCAmelCase__ : int = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(__UpperCAmelCase , save_format="""h5""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) -> Optional[int]:
if args_model_type is None:
lowerCAmelCase__ : List[Any] = list(MODEL_CLASSES.keys() )
else:
lowerCAmelCase__ : str = [args_model_type]
for j, model_type in enumerate(__UpperCAmelCase , start=1 ):
print("""=""" * 100 )
print(f""" Converting model type {j}/{len(__UpperCAmelCase )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowerCAmelCase__ : Tuple = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCAmelCase__ : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCAmelCase__ : Tuple = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__UpperCAmelCase , __UpperCAmelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
lowerCAmelCase__ : Union[str, Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(__UpperCAmelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
lowerCAmelCase__ : Dict = cached_file(__UpperCAmelCase , __UpperCAmelCase , force_download=not use_cached_models )
else:
lowerCAmelCase__ : Optional[int] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCAmelCase__ : int = cached_file(__UpperCAmelCase , __UpperCAmelCase , force_download=not use_cached_models )
else:
lowerCAmelCase__ : Union[str, Any] = model_shortcut_name
if os.path.isfile(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__UpperCAmelCase , pytorch_checkpoint_path=__UpperCAmelCase , config_file=__UpperCAmelCase , tf_dump_path=os.path.join(__UpperCAmelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__UpperCAmelCase , )
if remove_cached_files:
os.remove(__UpperCAmelCase )
os.remove(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
_A = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 704 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCamelCase ( a_ ):
def __init__( self : List[Any] , *UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = eval_examples
lowerCAmelCase__ : List[Any] = post_process_function
lowerCAmelCase__ : Union[str, Any] = quant_trainer_args
lowerCAmelCase__ : Tuple = 1_28 # default number of calibration samples
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int=None ) -> List[str]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
lowerCAmelCase__ : int = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase__ : Tuple = self._remove_unused_columns(UpperCamelCase , description="""Calibration""" )
return DataLoader(
UpperCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase , )
def _lowerCAmelCase ( self : int , UpperCamelCase : List[Any]=None ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase__ : Union[str, Any] = self.get_calib_dataloader(UpperCamelCase )
lowerCAmelCase__ : List[Any] = self.model
quant_trainer.configure_model(UpperCamelCase , self.quant_trainer_args , calib=UpperCamelCase )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase )
logger.info("""***** Running calibration *****""" )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCamelCase ):
# Prediction step
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.prediction_step(UpperCamelCase , UpperCamelCase , prediction_loss_only=UpperCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase , self.quant_trainer_args )
lowerCAmelCase__ : List[str] = model
def _lowerCAmelCase ( self : Any , UpperCamelCase : Any=None , UpperCamelCase : int=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : str = "eval" ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ : List[Any] = self.get_eval_dataloader(UpperCamelCase )
lowerCAmelCase__ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ : str = self.compute_metrics
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ : Optional[int] = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , )
finally:
lowerCAmelCase__ : int = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase__ : Optional[int] = self.post_process_function(UpperCamelCase , UpperCamelCase , output.predictions )
lowerCAmelCase__ : Union[str, Any] = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase__ : Union[str, Any] = metrics.pop(UpperCamelCase )
self.log(UpperCamelCase )
else:
lowerCAmelCase__ : str = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase__ : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Dict=None , UpperCamelCase : str = "test" ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ : List[Any] = self.compute_metrics
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ : Union[str, Any] = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , )
finally:
lowerCAmelCase__ : str = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ : Tuple = self.post_process_function(UpperCamelCase , UpperCamelCase , output.predictions , """predict""" )
lowerCAmelCase__ : Optional[int] = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase__ : Dict = metrics.pop(UpperCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[str]="./" ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.eval_dataset
lowerCAmelCase__ : Tuple = self.get_eval_dataloader(UpperCamelCase )
lowerCAmelCase__ : Any = next(iter(UpperCamelCase ) )
# saving device - to make it consistent
lowerCAmelCase__ : Tuple = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
lowerCAmelCase__ : str = tuple(v.to(UpperCamelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = self.model.to(UpperCamelCase )
model.eval()
model.float()
lowerCAmelCase__ : List[Any] = model.module if hasattr(UpperCamelCase , """module""" ) else model
quant_trainer.configure_model(UpperCamelCase , self.quant_trainer_args )
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , """model.onnx""" )
logger.info(f"""exporting model to {output_model_file}""" )
lowerCAmelCase__ : Union[str, Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
UpperCamelCase , UpperCamelCase , UpperCamelCase , export_params=UpperCamelCase , opset_version=13 , do_constant_folding=UpperCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=UpperCamelCase , )
logger.info("""onnx export finished""" )
| 507 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__snake_case: List[str] = logging.get_logger(__name__)
__snake_case: Union[str, Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_ ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
a_ : Optional[Any] = model
a_ : Tuple = kwargs.get("""model_save_dir""" , lowerCAmelCase_ )
a_ : List[Any] = kwargs.get("""latest_model_name""" , lowerCAmelCase_ )
def __call__( self , **lowerCAmelCase_ ):
'''simple docstring'''
a_ : Tuple = {k: np.array(lowerCAmelCase_ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase_ , lowerCAmelCase_ )
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
a_ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(lowerCAmelCase_ , providers=[provider] , sess_options=lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ):
'''simple docstring'''
a_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a_ : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name )
a_ : int = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a_ : str = self.model_save_dir.joinpath(lowerCAmelCase_ )
if src_path.exists():
a_ : Dict = Path(lowerCAmelCase_ ).joinpath(lowerCAmelCase_ )
try:
shutil.copyfile(lowerCAmelCase_ , lowerCAmelCase_ )
except shutil.SameFileError:
pass
def _lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ , ):
'''simple docstring'''
if os.path.isfile(lowerCAmelCase_ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def _lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase_ ):
a_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
a_ : List[Any] = Path(lowerCAmelCase_ )
# load model from hub
else:
# download model
a_ : List[str] = hf_hub_download(
repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , )
a_ : Dict = Path(lowerCAmelCase_ ).parent
a_ : List[str] = Path(lowerCAmelCase_ ).name
a_ : Tuple = OnnxRuntimeModel.load_model(lowerCAmelCase_ , provider=lowerCAmelCase_ , sess_options=lowerCAmelCase_ )
return cls(model=lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def _lowerCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : str = None
if len(str(lowerCAmelCase_ ).split("""@""" ) ) == 2:
a_ , a_ : Optional[int] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowerCAmelCase_ , revision=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 577 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a_ : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
a_ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase_ , os.listdir(lowerCAmelCase_ )[0] , """snapshots""" ) )]
a_ : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[str] = jax.random.PRNGKey(0 )
a_ : Optional[Any] = 4
a_ : Dict = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Tuple = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
a_ : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase_ ) == num_samples
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCAmelCase_ )
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : int = jax.random.PRNGKey(0 )
a_ : Any = 50
a_ : List[str] = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : int = replicate(lowerCAmelCase_ )
a_ : Optional[int] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : Any = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ )
a_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : List[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : List[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : List[str] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Optional[int] = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = shard(lowerCAmelCase_ )
a_ : List[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
a_ : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.random.PRNGKey(0 )
a_ : str = 50
a_ : Optional[int] = jax.device_count()
a_ : List[Any] = num_samples * [prompt]
a_ : List[Any] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : str = replicate(lowerCAmelCase_ )
a_ : Dict = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[Any] = shard(lowerCAmelCase_ )
a_ : Optional[Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
a_ , a_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
a_ : Optional[Any] = scheduler.create_state()
a_ : Dict = scheduler_state
a_ : int = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Tuple = jax.random.PRNGKey(0 )
a_ : Tuple = 50
a_ : int = jax.device_count()
a_ : Any = num_samples * [prompt]
a_ : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase_ )
# shard inputs and rng
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : List[Any] = jax.random.split(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : int = shard(lowerCAmelCase_ )
a_ : int = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase_ , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
a_ : Optional[Any] = jax.device_count()
a_ : Optional[int] = num_samples * [prompt]
a_ : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase_ )
a_ , a_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , )
a_ : Dict = replicate(lowerCAmelCase_ )
a_ : Any = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : str = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Union[str, Any] = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
a_ , a_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase_ , use_memory_efficient_attention=lowerCAmelCase_ , )
a_ : Optional[Any] = replicate(lowerCAmelCase_ )
a_ : int = pipeline.prepare_inputs(lowerCAmelCase_ )
a_ : Optional[int] = shard(lowerCAmelCase_ )
a_ : Union[str, Any] = pipeline(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , jit=lowerCAmelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ : Optional[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 577 | 1 |
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCAmelCase ( ):
lowerCamelCase : List[str] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowerCamelCase : Optional[int] = Dataset.from_dict(SCREAMING_SNAKE_CASE__)
return dataset
class __snake_case ( a__):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = get_dataset()
lowerCamelCase : List[Any] = make_duplicate_clusters(A, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = get_dataset()
lowerCamelCase : int = deduplicate_dataset(A )
self.assertEqual(len(A ), 2 )
print(A )
self.assertEqual(duplicate_clusters[0][0]['copies'], 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'], A )
| 320 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=32 ,snake_case=5 ,snake_case=4 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=False ,snake_case=True ,snake_case="None" ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : List[Any] = parent
lowercase : int = batch_size
lowercase : Any = seq_length
lowercase : Dict = is_training
lowercase : int = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Optional[Any] = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : List[str] = hidden_size
lowercase : str = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : List[str] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : List[str] = type_sequence_label_size
lowercase : Union[str, Any] = initializer_range
lowercase : str = num_labels
lowercase : Tuple = num_choices
lowercase : List[str] = relative_attention
lowercase : List[Any] = position_biased_input
lowercase : List[str] = pos_att_type
lowercase : Tuple = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = None
if self.use_input_mask:
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
lowercase : List[Any] = None
if self.use_token_type_ids:
lowercase : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase : Optional[int] = None
lowercase : Dict = None
lowercase : str = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = DebertaVaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : str = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case )[0]
lowercase : Tuple = model(snake_case ,token_type_ids=snake_case )[0]
lowercase : Optional[Any] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = DebertaVaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : Dict = DebertaVaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase : Dict = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = self.num_labels
lowercase : Optional[int] = DebertaVaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[Any] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = DebertaVaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : Tuple = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,start_positions=snake_case ,end_positions=snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = DebertaVaForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Optional[Any] = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] = config_and_inputs
lowercase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a : str= (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : Any= True
_a : List[Any]= False
_a : List[Any]= False
_a : Any= False
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = DebertaVaModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = DebertaVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowercase : Any = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : str = model(snake_case ,attention_mask=snake_case )[0]
# compare the actual values for a slice.
lowercase : Tuple = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case ,atol=1e-4 ) ,f"{output[:, 1:4, 1:4]}" )
| 336 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case ( self , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {}
if top_k is not None:
lowerCAmelCase__ :Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = load_image(__UpperCAmelCase )
lowerCAmelCase__ :str = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase__ :int = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase__ :Tuple = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = probs.topk(__UpperCAmelCase )
elif self.framework == "tf":
lowerCAmelCase__ :Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCAmelCase__ :List[str] = tf.math.top_k(__UpperCAmelCase , k=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ :Optional[int] = scores.tolist()
lowerCAmelCase__ :int = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase , __UpperCAmelCase )]
| 560 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __A (_SCREAMING_SNAKE_CASE ) ->float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , *,
__UpperCAmelCase = np.inf , __UpperCAmelCase = "linear" , __UpperCAmelCase = 0.0 , ):
'''simple docstring'''
lowerCAmelCase__ :str = regularization
lowerCAmelCase__ :Optional[int] = gamma
if kernel == "linear":
lowerCAmelCase__ :Union[str, Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
lowerCAmelCase__ :int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ :Dict = F"Unknown kernel: {kernel}"
raise ValueError(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return np.dot(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = observations
lowerCAmelCase__ :int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) :Union[str, Any] = np.shape(__UpperCAmelCase )
def to_minimize(__UpperCAmelCase ) -> float:
lowerCAmelCase__ :Optional[Any] = 0
((lowerCAmelCase__) , ) :Optional[int] = np.shape(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = LinearConstraint(__UpperCAmelCase , 0 , 0 )
lowerCAmelCase__ :Optional[Any] = Bounds(0 , self.regularization )
lowerCAmelCase__ :Dict = minimize(
__UpperCAmelCase , np.ones(__UpperCAmelCase ) , bounds=__UpperCAmelCase , constraints=[ly_contraint] ).x
lowerCAmelCase__ :Optional[int] = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ :Tuple = 0
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCAmelCase__ :int = s / n
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __UpperCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(A__, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a__ ( A__ = 1_0_0_0 ):
SCREAMING_SNAKE_CASE_ : List[str] = pythagorean_triple(A__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 101 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ : Dict = 16
lowercase__ : str = 32
def lowerCamelCase__ ( _A , _A = 16 , _A = "bert-base-cased" ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained(_A )
snake_case_ = load_dataset("glue" , "mrpc" )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A )
snake_case_ = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["lr"]
snake_case_ = int(config["num_epochs"] )
snake_case_ = int(config["seed"] )
snake_case_ = int(config["batch_size"] )
snake_case_ = args.model_name_or_path
set_seed(_A )
snake_case_ , snake_case_ = get_dataloaders(_A , _A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case_ = 1
snake_case_ = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
snake_case_ = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("glue" , "mrpc" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_A , _A ):
model.train()
for step, batch in enumerate(_A ):
snake_case_ = model(**_A )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_A )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_A ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_A , references=_A , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _A )
snake_case_ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(_A , _A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_A , )
parser.add_argument(
"--output_dir" , type=_A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=_A , default=_A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=_A , default=3 , help="Number of train epochs." , )
snake_case_ = parser.parse_args()
snake_case_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 376 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A_ ( snake_case , snake_case , snake_case=False , snake_case=False , snake_case=True , snake_case=False , snake_case="dummy_doc" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: key_lines}
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: sys_lines}
SCREAMING_SNAKE_CASE:Dict = {}
SCREAMING_SNAKE_CASE:Optional[int] = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.get_doc_mentions(snake_case , key_doc_lines[doc] , snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:Dict = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.get_doc_mentions(snake_case , sys_doc_lines[doc] , snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:str = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
if remove_nested:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.remove_nested_coref_mentions(snake_case , snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.remove_nested_coref_mentions(snake_case , snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE:Optional[Any] = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = get_coref_infos(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {}
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = evaluator.evaluate_documents(snake_case , snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE:Any = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE:Tuple = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE:List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) ,codebase_urls=["https://github.com/ns-moosavi/coval"] ,reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] ,)
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=False ):
SCREAMING_SNAKE_CASE:Optional[int] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE:Optional[int] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE:List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ ,sys_lines=SCREAMING_SNAKE_CASE__ ,metrics=SCREAMING_SNAKE_CASE__ ,NP_only=SCREAMING_SNAKE_CASE__ ,remove_nested=SCREAMING_SNAKE_CASE__ ,keep_singletons=SCREAMING_SNAKE_CASE__ ,min_span=SCREAMING_SNAKE_CASE__ ,)
return score
| 465 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A_ ( snake_case , snake_case , snake_case=False , snake_case=False , snake_case=True , snake_case=False , snake_case="dummy_doc" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: key_lines}
SCREAMING_SNAKE_CASE:Union[str, Any] = {doc: sys_lines}
SCREAMING_SNAKE_CASE:Dict = {}
SCREAMING_SNAKE_CASE:Optional[int] = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.get_doc_mentions(snake_case , key_doc_lines[doc] , snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:Dict = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.get_doc_mentions(snake_case , sys_doc_lines[doc] , snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE:str = reader.set_annotated_parse_trees(snake_case , key_doc_lines[doc] , snake_case , snake_case )
if remove_nested:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = reader.remove_nested_coref_mentions(snake_case , snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = reader.remove_nested_coref_mentions(snake_case , snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE:Optional[Any] = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = reader.get_mention_assignments(snake_case , snake_case )
SCREAMING_SNAKE_CASE:List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = get_coref_infos(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {}
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = evaluator.evaluate_documents(snake_case , snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE:Any = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE:Tuple = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE:List[str] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) ,codebase_urls=["https://github.com/ns-moosavi/coval"] ,reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] ,)
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=False ):
SCREAMING_SNAKE_CASE:Optional[int] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE:Optional[int] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE:List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ ,sys_lines=SCREAMING_SNAKE_CASE__ ,metrics=SCREAMING_SNAKE_CASE__ ,NP_only=SCREAMING_SNAKE_CASE__ ,remove_nested=SCREAMING_SNAKE_CASE__ ,keep_singletons=SCREAMING_SNAKE_CASE__ ,min_span=SCREAMING_SNAKE_CASE__ ,)
return score
| 465 | 1 |
A : Optional[int] = {}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowercase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowercase = _calculate(days - 1 , snake_case__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowercase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowercase = _calculate(days - 1 , snake_case__ , 0 )
_lowercase = state_late + state_absent + state_ontime
_lowercase = prizestrings
return prizestrings
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any = 30 ) -> List[Any]:
return _calculate(snake_case__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 287 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase = 16
_lowerCAmelCase = 32
def __UpperCamelCase ( snake_case__ , snake_case__ = 16 ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A_ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
A_ : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : str = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : Tuple = 16
elif accelerator.mixed_precision != "no":
A_ : List[Any] = 8
else:
A_ : List[Any] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A_ : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( snake_case__ , snake_case__ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
A_ : List[Any] = 2
# New Code #
A_ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
A_ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Union[str, Any] = config["""lr"""]
A_ : Tuple = int(config["""num_epochs"""] )
A_ : List[Any] = int(config["""seed"""] )
A_ : Dict = int(config["""batch_size"""] )
A_ : Tuple = evaluate.load("""glue""" , """mrpc""" )
set_seed(snake_case__ )
A_ , A_ : List[Any] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
A_ : Optional[int] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
A_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Optional[int] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
A_ : Optional[int] = model(**snake_case__ )
A_ : Tuple = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : List[str] = model(**snake_case__ )
A_ : List[str] = outputs.logits.argmax(dim=-1 )
A_ , A_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , snake_case__ )
def __UpperCamelCase ( ):
A_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A_ : int = parser.parse_args()
A_ : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 180 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase__ ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self : int , a_ : Optional[int]=None , **a_ : List[str] ):
'''simple docstring'''
super().__init__(features=a_ )
__UpperCAmelCase : Optional[Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case__ ( self : List[str] , a_ : Tuple ):
'''simple docstring'''
import torch
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a_ )
return column
def snake_case__ ( self : int , a_ : Any ):
'''simple docstring'''
import torch
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : Optional[Any] = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : int = {'''dtype''': torch.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
__UpperCAmelCase : Union[str, Any] = np.asarray(a_ )
return torch.tensor(a_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case__ ( self : List[str] , a_ : Dict ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a_ , '''__array__''' ) and not isinstance(a_ , torch.Tensor ):
__UpperCAmelCase : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def snake_case__ ( self : int , a_ : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def snake_case__ ( self : Optional[int] , a_ : pa.Table ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(a_ )
__UpperCAmelCase : List[Any] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def snake_case__ ( self : List[Any] , a_ : pa.Table ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_column(a_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(a_ )
__UpperCAmelCase : Dict = self._consolidate(a_ )
return column
def snake_case__ ( self : Dict , a_ : pa.Table ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(a_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_batch(a_ )
__UpperCAmelCase : str = self.recursive_tensorize(a_ )
for column_name in batch:
__UpperCAmelCase : List[str] = self._consolidate(batch[column_name] )
return batch
| 702 |
def a ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = len(_UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
__UpperCAmelCase : str = 0
print(_UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(_UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_UpperCAmelCase , end=''',''' )
__UpperCAmelCase : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =[1, 3, 0, 5, 8, 5]
__A =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 241 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : List[str] = {
"""nielsr/canine-s""": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
a : Tuple = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a : int = 0
a : int = 0XE000
a : int = 0XE001
a : List[Any] = 0XE002
a : Any = 0XE003
a : Optional[int] = 0XE004
# Maps special codepoints to human-readable names.
a : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __lowercase : Dict=chr(A_ ) , __lowercase : List[Any]=chr(A_ ) , __lowercase : Optional[Any]=chr(A_ ) , __lowercase : Union[str, Any]=chr(A_ ) , __lowercase : List[Any]=chr(A_ ) , __lowercase : List[str]=chr(A_ ) , __lowercase : str=False , __lowercase : str=2048 , **__lowercase : str , ) -> List[Any]:
__UpperCAmelCase : Tuple = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
__UpperCAmelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
__UpperCAmelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
__UpperCAmelCase : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : List[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , model_max_length=A_ , **A_ , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCAmelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCAmelCase : Dict = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCAmelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCAmelCase : Union[str, Any] = UNICODE_VOCAB_SIZE
__UpperCAmelCase : str = len(self._special_codepoints )
@property
def UpperCAmelCase ( self : Dict ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase ( self : Dict , __lowercase : str ) -> List[str]:
return list(A_ )
def UpperCAmelCase ( self : List[Any] , __lowercase : str ) -> int:
try:
return ord(A_ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def UpperCAmelCase ( self : List[Any] , __lowercase : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def UpperCAmelCase ( self : List[str] , __lowercase : List[Any] ) -> List[Any]:
return "".join(A_ )
def UpperCAmelCase ( self : List[Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Any = [self.cls_token_id]
__UpperCAmelCase : Tuple = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase ( self : int , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCAmelCase : List[Any] = [1] + ([0] * len(A_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(A_ )) + [1]
return result
def UpperCAmelCase ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
__UpperCAmelCase : Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ) -> Any:
return ()
| 63 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_UpperCAmelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = ["pixel_values"]
def __init__( self : Optional[int] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 2_55 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: Tuple = size if size is not None else {"""shortest_edge""": 2_24}
lowerCamelCase_: List[Any] = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_: Dict = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCamelCase_: Any = get_size_dict(A_ , param_name="""crop_size""" )
lowerCamelCase_: Any = do_resize
lowerCamelCase_: Union[str, Any] = size
lowerCamelCase_: Optional[int] = do_center_crop
lowerCamelCase_: Union[str, Any] = crop_size
lowerCamelCase_: Union[str, Any] = resample
lowerCamelCase_: Any = do_rescale
lowerCamelCase_: Optional[Any] = rescale_factor
lowerCamelCase_: int = do_normalize
lowerCamelCase_: List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_: List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : List[str] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[str] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_: Any = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" in size:
lowerCamelCase_: Optional[Any] = get_resize_output_image_size(A_ , size["""shortest_edge"""] , default_to_square=A_ )
elif "height" in size and "width" in size:
lowerCamelCase_: Union[str, Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_: Optional[int] = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(A_ , size=(size["""height"""], size["""width"""]) , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Optional[int] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> Dict:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Union[str, Any] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Any , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_: Optional[Any] = to_numpy_array(A_ )
if do_resize:
lowerCamelCase_: Tuple = self.resize(image=A_ , size=A_ , resample=A_ )
if do_center_crop:
lowerCamelCase_: int = self.center_crop(A_ , size=A_ )
if do_rescale:
lowerCamelCase_: str = self.rescale(image=A_ , scale=A_ )
if do_normalize:
lowerCamelCase_: Tuple = self.normalize(image=A_ , mean=A_ , std=A_ )
lowerCamelCase_: Any = to_channel_dimension_format(A_ , A_ )
return image
def lowerCAmelCase ( self : Dict , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_: Any = resample if resample is not None else self.resample
lowerCamelCase_: List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_: int = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_: Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_: Dict = image_std if image_std is not None else self.image_std
lowerCamelCase_: int = size if size is not None else self.size
lowerCamelCase_: str = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_: Dict = get_size_dict(A_ , param_name="""crop_size""" )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCamelCase_: str = make_batched(A_ )
lowerCamelCase_: List[str] = [
[
self._preprocess_image(
image=A_ , do_resize=A_ , size=A_ , resample=A_ , do_center_crop=A_ , crop_size=A_ , do_rescale=A_ , rescale_factor=A_ , do_normalize=A_ , image_mean=A_ , image_std=A_ , data_format=A_ , )
for img in video
]
for video in videos
]
lowerCamelCase_: int = {"""pixel_values""": videos}
return BatchFeature(data=A_ , tensor_type=A_ )
| 423 | 0 |
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = num_of_nodes
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _UpperCamelCase ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _UpperCamelCase ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase_ = self.find_component(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase_ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_UpperCamelCase )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase_ = self.find_component(_UpperCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_UpperCamelCase )
def __a ( self ) -> None:
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase_ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = edge
lowerCAmelCase_ = self.m_component[u]
lowerCAmelCase_ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase_ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = edge
lowerCAmelCase_ = self.m_component[u]
lowerCAmelCase_ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase_ = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''transfo-xl'''
_lowercase =['''mems''']
_lowercase ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=267_735 , _UpperCamelCase=[20_000, 40_000, 200_000] , _UpperCamelCase=1_024 , _UpperCamelCase=1_024 , _UpperCamelCase=16 , _UpperCamelCase=64 , _UpperCamelCase=4_096 , _UpperCamelCase=4 , _UpperCamelCase=False , _UpperCamelCase=18 , _UpperCamelCase=1_600 , _UpperCamelCase=1_000 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase=-1 , _UpperCamelCase=True , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase="normal" , _UpperCamelCase=0.01 , _UpperCamelCase=0.01 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = []
self.cutoffs.extend(_UpperCamelCase )
if proj_share_all_but_first:
lowerCAmelCase_ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase_ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase_ = d_model
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_head
lowerCAmelCase_ = d_inner
lowerCAmelCase_ = div_val
lowerCAmelCase_ = pre_lnorm
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = mem_len
lowerCAmelCase_ = same_length
lowerCAmelCase_ = attn_type
lowerCAmelCase_ = clamp_len
lowerCAmelCase_ = sample_softmax
lowerCAmelCase_ = adaptive
lowerCAmelCase_ = dropout
lowerCAmelCase_ = dropatt
lowerCAmelCase_ = untie_r
lowerCAmelCase_ = init
lowerCAmelCase_ = init_range
lowerCAmelCase_ = proj_init_std
lowerCAmelCase_ = init_std
lowerCAmelCase_ = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def __a ( self ) -> List[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __a ( self , _UpperCamelCase ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 279 | 0 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =parent
__lowercase =config_class
__lowercase =has_text_modality
__lowercase =kwargs
__lowercase =common_properties
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.config_class(**self.inputs_dict)
__lowercase =(
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'])
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase) , msg=f"""`{prop}` does not exist""")
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCAmelCase):
try:
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase) , _lowerCAmelCase , msg=f"""`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase)}""")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCAmelCase):
try:
__lowercase =self.config_class(**{name: idx})
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase) , _lowerCAmelCase , msg=f"""`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase)}""")
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.config_class(**self.inputs_dict)
__lowercase =json.loads(config.to_json_string())
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase =os.path.join(_lowerCAmelCase , 'config.json')
config_first.to_json_file(_lowerCAmelCase)
__lowercase =self.config_class.from_json_file(_lowerCAmelCase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCAmelCase)
__lowercase =self.config_class.from_pretrained(_lowerCAmelCase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.config_class(**self.inputs_dict)
__lowercase ='test'
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase)
config_first.save_pretrained(_lowerCAmelCase)
__lowercase =self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase)
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict())
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.config_class(**self.inputs_dict , num_labels=5)
self.parent.assertEqual(len(config.idalabel) , 5)
self.parent.assertEqual(len(config.labelaid) , 5)
__lowercase =3
self.parent.assertEqual(len(config.idalabel) , 3)
self.parent.assertEqual(len(config.labelaid) , 3)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
if self.config_class.is_composition:
return
__lowercase =self.config_class()
self.parent.assertIsNotNone(_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =copy.deepcopy(_lowerCAmelCase)
__lowercase =self.config_class(**_lowerCAmelCase)
__lowercase =[]
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa))
elif getattr(_lowerCAmelCase , _lowerCAmelCase) != value:
wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase), value))
if len(_lowerCAmelCase) > 0:
__lowercase ='\n'.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values])
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""")
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 474 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Tuple="" , _lowerCAmelCase : List[str]="train"):
'''simple docstring'''
assert os.path.isdir(_lowerCAmelCase)
__lowercase =[]
__lowercase =os.listdir(_lowerCAmelCase)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase)
if not os.path.isfile(_lowerCAmelCase):
continue
self.documents.append(_lowerCAmelCase)
def __len__( self : Dict):
'''simple docstring'''
return len(self.documents)
def __getitem__( self : Optional[Any] , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.documents[idx]
__lowercase =document_path.split('/')[-1]
with open(_lowerCAmelCase , encoding='utf-8') as source:
__lowercase =source.read()
__lowercase , __lowercase =process_story(_lowerCAmelCase)
return document_name, story_lines, summary_lines
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =list(filter(lambda _lowerCAmelCase : len(_lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase =[_add_missing_period(_lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
__lowercase =[]
__lowercase =deque(_lowerCAmelCase )
while True:
try:
__lowercase =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase =list(filter(lambda _lowerCAmelCase : not t.startswith('@highlight' ) , _lowerCAmelCase ) )
return story_lines, summary_lines
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if len(_lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCAmelCase )) )
return sequence
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =torch.ones_like(_lowerCAmelCase )
__lowercase =sequence == pad_token_id
__lowercase =0
return mask
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[tokenizer.encode(_lowerCAmelCase ) for line in story_lines]
__lowercase =[token for sentence in story_lines_token_ids for token in sentence]
__lowercase =[tokenizer.encode(_lowerCAmelCase ) for line in summary_lines]
__lowercase =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for sequence in batch:
__lowercase =-1
__lowercase =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase )
| 474 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
snake_case = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
snake_case = {"""facebook/blenderbot-3B""": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Union[str, Any] = [chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Any = set()
lowerCAmelCase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : int = char
return pairs
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : int = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : int="replace" , a__ : str="<s>" , a__ : Dict="</s>" , a__ : List[str]="</s>" , a__ : List[Any]="<s>" , a__ : Optional[Any]="<unk>" , a__ : Tuple="<pad>" , a__ : List[Any]="<mask>" , a__ : Tuple=False , **a__ : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
lowerCAmelCase__ : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
lowerCAmelCase__ : Dict = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
lowerCAmelCase__ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
lowerCAmelCase__ : Any = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ : List[str] = json.load(a__ )
lowerCAmelCase__ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : int = errors # how to handle errors in decoding
lowerCAmelCase__ : Optional[int] = bytes_to_unicode()
lowerCAmelCase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding="utf-8" ) as merges_handle:
lowerCAmelCase__ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : int = dict(zip(a__ , range(len(a__ ) ) ) )
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _A ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def _A ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self : Optional[Any] , a__ : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : str = tuple(a__ )
lowerCAmelCase__ : Tuple = get_pairs(a__ )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = bigram
lowerCAmelCase__ : int = []
lowerCAmelCase__ : str = 0
while i < len(a__ ):
try:
lowerCAmelCase__ : Dict = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[str] = tuple(a__ )
lowerCAmelCase__ : str = new_word
if len(a__ ) == 1:
break
else:
lowerCAmelCase__ : Optional[int] = get_pairs(a__ )
lowerCAmelCase__ : Optional[Any] = " ".join(a__ )
lowerCAmelCase__ : Any = word
return word
def _A ( self : List[Any] , a__ : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = []
for token in re.findall(self.pat , a__ ):
lowerCAmelCase__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(" " ) )
return bpe_tokens
def _A ( self : Dict , a__ : Dict ):
'''simple docstring'''
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def _A ( self : Optional[Any] , a__ : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(a__ )
def _A ( self : Optional[Any] , a__ : int ):
'''simple docstring'''
lowerCAmelCase__ : str = "".join(a__ )
lowerCAmelCase__ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _A ( self : List[Any] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : str = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ : List[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + "\n" )
lowerCAmelCase__ : Any = 0
with open(a__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase__ : Optional[Any] = token_index
writer.write(" ".join(a__ ) + "\n" )
index += 1
return vocab_file, merge_file
def _A ( self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def _A ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [self.sep_token_id]
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : List[Any] , a__ : List[Any] , a__ : Dict=False , **a__ : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : int = " " + text
return (text, kwargs)
def _A ( self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _A ( self : Optional[Any] , a__ : "Conversation" ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a__ )
lowerCAmelCase__ : List[Any] = " ".join(a__ )
lowerCAmelCase__ : Optional[int] = self.encode(a__ )
if len(a__ ) > self.model_max_length:
lowerCAmelCase__ : int = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 568 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Optional[Any] = LayoutLMTokenizer
A_ : Union[str, Any] = LayoutLMTokenizerFast
A_ : int = True
A_ : str = True
def _A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : int , **a__ : Optional[Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Optional[Any] , a__ : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = "UNwant\u00E9d,running"
lowerCAmelCase__ : Dict = "unwanted, running"
return input_text, output_text
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def _A ( self : Optional[Any] ):
'''simple docstring'''
pass
| 568 | 1 |
from __future__ import annotations
import time
import numpy as np
UpperCamelCase = [8, 5, 9, 7]
UpperCamelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCamelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[list[int]] , ) -> None:
lowerCAmelCase__ = claim_vector
lowerCAmelCase__ = allocated_resources_table
lowerCAmelCase__ = maximum_claim_table
def a ( self : List[Any] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def a ( self : Dict ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def a ( self : List[Any] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def a ( self : Union[str, Any] ) -> dict[int, list[int]]:
return {self.__need().index(SCREAMING_SNAKE_CASE__ ): i for i in self.__need()}
def a ( self : Any , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> None:
lowerCAmelCase__ = self.__need()
lowerCAmelCase__ = self.__allocated_resources_table
lowerCAmelCase__ = self.__available_resources()
lowerCAmelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCAmelCase__ = False
for each_need in need_list:
lowerCAmelCase__ = True
for index, need in enumerate(SCREAMING_SNAKE_CASE__ ):
if need > available_resources[index]:
lowerCAmelCase__ = False
break
if execution:
lowerCAmelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCAmelCase__ = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE__ )
# update available/freed resources stack
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE__ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(SCREAMING_SNAKE_CASE__ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def a ( self : List[str] ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE__ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE__ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(SCREAMING_SNAKE_CASE__ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(SCREAMING_SNAKE_CASE__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCamelCase = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "facebook/nllb-200-distilled-600M"
snake_case__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
snake_case__ = "translator"
snake_case__ = AutoTokenizer
snake_case__ = AutoModelForSeqaSeqLM
snake_case__ = LANGUAGE_CODES
snake_case__ = ["text", "text", "text"]
snake_case__ = ["text"]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors="pt" , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 61 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :Optional[int] = json.loads(f.read() )
__UpperCamelCase :List[str] = collections.OrderedDict()
__UpperCamelCase :Dict = collections.OrderedDict()
__UpperCamelCase :Tuple = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
__UpperCamelCase :int = f.readlines()
__UpperCamelCase :Dict = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = b
__UpperCamelCase :List[str] = idx
for wd in b:
__UpperCamelCase :int = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase , __lowercase="<|endoftext|>" , __lowercase="<|endoftext|>" , __lowercase="<|startoftext|>" , __lowercase="<|endoftext|>" , __lowercase=False , **__lowercase , ) -> int:
super().__init__(
unk_token=__lowercase , pad_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , do_clean_text=__lowercase , **__lowercase , )
if not os.path.isfile(__lowercase):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''')
if not os.path.isfile(__lowercase):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''')
__UpperCamelCase :Optional[int] = do_clean_text
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = load_vocab_and_emoji(__lowercase , __lowercase)
__UpperCamelCase :int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def UpperCamelCase__ ( self) -> Dict:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab)
def UpperCamelCase__ ( self) -> Optional[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
return self.subword_tokenizer.tokenize(__lowercase , clean=self.do_clean_text)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.vocab.get(__lowercase , self.vocab.get(self.unk_token))
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
return self.subword_tokenizer.convert_id_to_token(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Dict:
__UpperCamelCase :str = ''''''.join(__lowercase).strip()
return out_string
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Dict = 0
if os.path.isdir(__lowercase):
__UpperCamelCase :Optional[Any] = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :int = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''])
else:
__UpperCamelCase :Tuple = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
__UpperCamelCase :int = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(__lowercase , '''w''' , encoding='''utf-8''') as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''')
__UpperCamelCase :str = token_index
writer.write(''','''.join(__lowercase) + '''\n''')
index += 1
with open(__lowercase , '''w''' , encoding='''utf-8''') as writer:
json.dump(self.emoji , __lowercase)
return vocab_file, emoji_file
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :List[str] = vocab # same as swe
__UpperCamelCase :Optional[int] = ids_to_tokens # same as bpe
__UpperCamelCase :int = emoji
__UpperCamelCase :Any = np.max([len(__lowercase) for w in self.vocab.keys()])
__UpperCamelCase :Optional[int] = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''')
__UpperCamelCase :Tuple = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''')
__UpperCamelCase :Tuple = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''')
__UpperCamelCase :List[Any] = re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''')
__UpperCamelCase :Optional[Any] = re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''')
__UpperCamelCase :Tuple = re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''')
__UpperCamelCase :Optional[Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
__UpperCamelCase :List[str] = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
__UpperCamelCase :int = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks})
def __len__( self) -> Tuple:
return len(self.ids_to_tokens)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = self.content_repattera.sub('''<URL>''' , __lowercase)
__UpperCamelCase :Tuple = self.content_repattera.sub('''<EMAIL>''' , __lowercase)
__UpperCamelCase :Tuple = self.content_repattera.sub('''<TEL>''' , __lowercase)
__UpperCamelCase :Optional[int] = self.content_repattera.sub('''<DATE>''' , __lowercase)
__UpperCamelCase :Union[str, Any] = self.content_repattera.sub('''<DATE>''' , __lowercase)
__UpperCamelCase :Optional[int] = self.content_repattera.sub('''<PRICE>''' , __lowercase)
__UpperCamelCase :Optional[int] = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
__UpperCamelCase :List[Any] = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''')
return content
def UpperCamelCase__ ( self , __lowercase , __lowercase=False) -> int:
__UpperCamelCase :Optional[Any] = text.replace(''' ''' , '''<SP>''')
__UpperCamelCase :Dict = text.replace(''' ''' , '''<SP>''')
__UpperCamelCase :Tuple = text.replace('''\r\n''' , '''<BR>''')
__UpperCamelCase :str = text.replace('''\n''' , '''<BR>''')
__UpperCamelCase :int = text.replace('''\r''' , '''<BR>''')
__UpperCamelCase :str = text.replace('''\t''' , '''<TAB>''')
__UpperCamelCase :Dict = text.replace('''—''' , '''ー''')
__UpperCamelCase :List[Any] = text.replace('''−''' , '''ー''')
for k, v in self.emoji["emoji"].items():
if k in text:
__UpperCamelCase :Any = text.replace(__lowercase , __lowercase)
if clean:
__UpperCamelCase :Tuple = self.clean_text(__lowercase)
def check_simbol(__lowercase):
__UpperCamelCase :Optional[int] = x.encode()
if len(__lowercase) == 1 and len(__lowercase) == 2:
__UpperCamelCase :List[Any] = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0xC_2A1 and c <= 0xC_2BF)
or (c >= 0xC_780 and c <= 0xC_783)
or (c >= 0xC_AB9 and c <= 0xC_BBF)
or (c >= 0xC_C80 and c <= 0xC_DA2)
):
return True
return False
def checkuae(__lowercase):
__UpperCamelCase :Any = x.encode()
if len(__lowercase) == 1 and len(__lowercase) == 3:
__UpperCamelCase :Dict = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0xE28_080 and c <= 0xE2B_07F:
return True
return False
__UpperCamelCase :Dict = 0
__UpperCamelCase :List[Any] = []
while pos < len(__lowercase):
__UpperCamelCase :Tuple = min(len(__lowercase) , pos + self.maxlen + 1) if text[pos] == '''<''' else pos + 3
__UpperCamelCase :Optional[Any] = [] # (token_id, token, pos)
for e in range(__lowercase , __lowercase , -1):
__UpperCamelCase :Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__lowercase) > 2:
__UpperCamelCase :Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(__lowercase) > 0:
# the smallest token_id is adopted
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = sorted(__lowercase , key=lambda __lowercase: x[0])[0]
result.append(__lowercase)
__UpperCamelCase :Optional[Any] = e
else:
__UpperCamelCase :str = pos + 1
__UpperCamelCase :str = text[pos:end]
if check_simbol(__lowercase):
result.append('''<KIGOU>''')
elif checkuae(__lowercase):
result.append('''<U2000U2BFF>''')
else:
for i in wd.encode('''utf-8'''):
result.append('''<|byte%d|>''' % i)
__UpperCamelCase :Tuple = end
return result
def UpperCamelCase__ ( self , __lowercase , __lowercase="\n") -> Optional[int]:
__UpperCamelCase :List[str] = []
__UpperCamelCase :Optional[int] = []
__UpperCamelCase :int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(__lowercase) > 0:
words.append(bytearray(__lowercase).decode('''utf-8''' , errors='''replace'''))
__UpperCamelCase :int = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word])
elif word == "<SP>":
words.append(''' ''')
elif word == "<BR>":
words.append(__lowercase)
elif word == "<TAB>":
words.append('''\t''')
elif word == "<BLOCK>":
words.append('''▀''')
elif word == "<KIGOU>":
words.append('''ǀ''')
elif word == "<U2000U2BFF>":
words.append('''‖''')
else:
words.append(__lowercase)
if len(__lowercase) > 0:
words.append(bytearray(__lowercase).decode('''utf-8''' , errors='''replace'''))
__UpperCamelCase :Optional[int] = ''''''.join(__lowercase)
return text
| 452 | import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase = '''bart'''
__lowercase = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCamelCase :Union[str, Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCamelCase :Optional[int] = qar_model.eval()
else:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__UpperCamelCase :str = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCamelCase :List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCamelCase :Any = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCamelCase :List[str] = sas_model.eval()
else:
__UpperCamelCase , __UpperCamelCase :Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCamelCase :int = faiss.StandardGpuResources()
__UpperCamelCase :List[Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCamelCase :List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCamelCase :Tuple = faiss.IndexFlatIP(128 )
__UpperCamelCase :str = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__UpperCamelCase , __UpperCamelCase :Any = (None, None)
__UpperCamelCase :Optional[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCamelCase :int = elia['''train_eli5''']
__UpperCamelCase :List[str] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCamelCase :Optional[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase = load_models()
__lowercase , __lowercase = load_train_data()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=10 ):
'''simple docstring'''
__UpperCamelCase :str = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="wiki40b" , SCREAMING_SNAKE_CASE="dense" , SCREAMING_SNAKE_CASE=10 ):
'''simple docstring'''
if source == "none":
__UpperCamelCase , __UpperCamelCase :Optional[int] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCamelCase , __UpperCamelCase :List[str] = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase , __UpperCamelCase :Optional[int] = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :int = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCamelCase :int = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.95 , SCREAMING_SNAKE_CASE=0.8 ):
'''simple docstring'''
with torch.no_grad():
__UpperCamelCase :Union[str, Any] = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__lowercase = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__lowercase = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__lowercase = st.sidebar.checkbox('''Demo options''')
if demo_options:
__lowercase = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__lowercase = action_list.index(action_st)
__lowercase = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__lowercase = show_type == '''Show full text of passages'''
else:
__lowercase = 3
__lowercase = True
__lowercase = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__lowercase = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__lowercase = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__lowercase = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__lowercase = '''wiki40b'''
__lowercase = '''dense'''
__lowercase = '''beam'''
__lowercase = 2
__lowercase = 64
__lowercase = 256
__lowercase = None
__lowercase = None
__lowercase = st.sidebar.checkbox('''Generation options''')
if generate_options:
__lowercase = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__lowercase = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__lowercase = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowercase = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__lowercase = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__lowercase = None
# start main text
__lowercase = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__lowercase = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase = st.text_input('''Enter your question here:''', '''''')
else:
__lowercase = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__lowercase , __lowercase = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__lowercase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase = support_list[:10]
__lowercase = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__lowercase = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__lowercase = res[1].strip()
if sec_titles == "":
__lowercase = '''[{}]({})'''.format(res[0], wiki_url)
else:
__lowercase = sec_titles.split(''' & ''')
__lowercase = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase = find_nearest_training(question)
__lowercase = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__lowercase = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__lowercase = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 452 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a ( lowerCAmelCase_ ):
__lowerCAmelCase : List[Any] = """Wav2Vec2FeatureExtractor"""
__lowerCAmelCase : int = """AutoTokenizer"""
def __init__( self :int ,__lowercase :Optional[int] ,__lowercase :Any ):
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.feature_extractor
snake_case__ : Any = False
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ,__lowercase :Optional[Any] ,**__lowercase :List[str] ):
try:
return super().from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' ,__SCREAMING_SNAKE_CASE ,)
snake_case__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case__ : int = WavaVecaCTCTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
return cls(feature_extractor=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
def __call__( self :List[Any] ,*__lowercase :List[str] ,**__lowercase :Optional[int] ):
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
snake_case__ : str = kwargs.pop('''raw_speech''' )
else:
snake_case__ : Optional[int] = kwargs.pop('''audio''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = kwargs.pop('''sampling_rate''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = kwargs.pop('''text''' ,__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : int = args[0]
snake_case__ : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case__ : str = self.feature_extractor(__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,sampling_rate=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if text is not None:
snake_case__ : Tuple = self.tokenizer(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ : int = encodings['''input_ids''']
return inputs
def __lowerCamelCase ( self :Union[str, Any] ,*__lowercase :List[str] ,**__lowercase :Optional[Any] ):
if self._in_target_context_manager:
return self.current_processor.pad(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
snake_case__ : str = kwargs.pop('''input_features''' ,__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = kwargs.pop('''labels''' ,__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : Optional[int] = args[0]
snake_case__ : Dict = args[1:]
if input_features is not None:
snake_case__ : int = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if labels is not None:
snake_case__ : Optional[Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case__ : Optional[Any] = labels['''input_ids''']
return input_features
def __lowerCamelCase ( self :Dict ,*__lowercase :List[str] ,**__lowercase :Optional[int] ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self :Optional[int] ,*__lowercase :Optional[int] ,**__lowercase :int ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self :Optional[Any] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = self.tokenizer
yield
snake_case__ : Optional[int] = self.feature_extractor
snake_case__ : Any = False
| 252 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 1 |
import qiskit
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : List[str] = qiskit.QuantumCircuit(A_, A_ )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE_ : int = qiskit.execute(A_, A_, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 101 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = ConsistencyModelPipeline
a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
a_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def _lowerCAmelCase ( self , lowerCAmelCase_=False ):
'''simple docstring'''
if class_cond:
a_ : List[str] = self.dummy_cond_unet
else:
a_ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith("""mps""" ):
a_ : int = torch.manual_seed(lowerCAmelCase_ )
else:
a_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Union[str, Any] = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Optional[Any] = self.get_dummy_components()
a_ : Any = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Tuple = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : int = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Union[str, Any] = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Tuple = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : str = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Dict = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : str = 0
a_ : Union[str, Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : str = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : List[Any] = self.get_dummy_components()
a_ : Optional[int] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : List[str] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Any = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : List[Any] = 1
a_ : int = None
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : List[Any] = image[0, -3:, -3:, -1]
a_ : List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : str = self.get_dummy_components(class_cond=lowerCAmelCase_ )
a_ : Optional[Any] = ConsistencyModelPipeline(**lowerCAmelCase_ )
a_ : Union[str, Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
a_ : Any = 1
a_ : Optional[int] = None
a_ : Optional[Any] = 0
a_ : str = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
a_ : str = image[0, -3:, -3:, -1]
a_ : int = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_=False , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
a_ : List[str] = torch.manual_seed(lowerCAmelCase_ )
a_ : int = {
"""num_inference_steps""": None,
"""timesteps""": [22, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
a_ : int = self.get_fixed_latents(seed=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ , shape=lowerCAmelCase_ )
a_ : str = latents
return inputs
def _lowerCAmelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=(1, 3, 64, 64) ):
'''simple docstring'''
if type(lowerCAmelCase_ ) == str:
a_ : Dict = torch.device(lowerCAmelCase_ )
a_ : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
return latents
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[str] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Union[str, Any] = self.get_inputs()
a_ : List[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Dict = image[0, -3:, -3:, -1]
a_ : int = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : List[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : Tuple = self.get_inputs()
a_ : Optional[Any] = 1
a_ : List[str] = None
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Any = image[0, -3:, -3:, -1]
a_ : Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Tuple = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : str = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : int = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
a_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
a_ : Dict = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a_ : List[str] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
a_ : str = 1
a_ : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
a_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
a_ : Optional[Any] = image[0, -3:, -3:, -1]
a_ : Optional[int] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 577 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase__: Optional[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class snake_case_ ( lowerCAmelCase ):
@staticmethod
def __A ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=__lowerCAmelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger('transformers-cli/converting' )
self._logger.info(F'Loading model {model_type}' )
SCREAMING_SNAKE_CASE_ : int = model_type
SCREAMING_SNAKE_CASE_ : Any = tf_checkpoint
SCREAMING_SNAKE_CASE_ : int = pytorch_dump_output
SCREAMING_SNAKE_CASE_ : Tuple = config
SCREAMING_SNAKE_CASE_ : Any = finetuning_task_name
def __A ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE_ : Optional[int] = self._tf_checkpoint
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
else:
SCREAMING_SNAKE_CASE_ : Tuple = self._tf_checkpoint
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
convert_transfo_xl_checkpoint_to_pytorch(
__lowerCAmelCase , self._config , self._pytorch_dump_output , __lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 311 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase__: Optional[int] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowerCAmelCase__: Dict = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowerCAmelCase__: List[str] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ):
if rouge_types is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE_ : str = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase )
if use_aggregator:
SCREAMING_SNAKE_CASE_ : Optional[int] = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE_ : Tuple = []
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = scorer.score(__lowerCAmelCase , __lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
SCREAMING_SNAKE_CASE_ : List[str] = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE_ : int = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE_ : List[Any] = [score[key] for score in scores]
return result
| 311 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase = '''UperNetConfig'''
class __A( nn.Module ):
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[int, Tuple[int, int]] , __UpperCamelCase : Union[int, Tuple[int, int], str] = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
lowerCamelCase_ = nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , bias=__UpperCamelCase , dilation=__UpperCamelCase , )
lowerCamelCase_ = nn.BatchNormad(__UpperCamelCase )
lowerCamelCase_ = nn.ReLU()
def lowercase__ ( self : Tuple , __UpperCamelCase : torch.Tensor ):
lowerCamelCase_ = self.conv(__UpperCamelCase )
lowerCamelCase_ = self.batch_norm(__UpperCamelCase )
lowerCamelCase_ = self.activation(__UpperCamelCase )
return output
class __A( nn.Module ):
def __init__( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
super().__init__()
lowerCamelCase_ = [
nn.AdaptiveAvgPoolad(__UpperCamelCase ),
UperNetConvModule(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : torch.Tensor ):
lowerCamelCase_ = input
for layer in self.layers:
lowerCamelCase_ = layer(__UpperCamelCase )
return hidden_state
class __A( nn.Module ):
def __init__( self : Optional[int] , __UpperCamelCase : Tuple[int, ...] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool ):
super().__init__()
lowerCamelCase_ = pool_scales
lowerCamelCase_ = align_corners
lowerCamelCase_ = in_channels
lowerCamelCase_ = channels
lowerCamelCase_ = []
for i, pool_scale in enumerate(__UpperCamelCase ):
lowerCamelCase_ = UperNetPyramidPoolingBlock(pool_scale=__UpperCamelCase , in_channels=__UpperCamelCase , channels=__UpperCamelCase )
self.blocks.append(__UpperCamelCase )
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : torch.Tensor ):
lowerCamelCase_ = []
for ppm in self.blocks:
lowerCamelCase_ = ppm(__UpperCamelCase )
lowerCamelCase_ = nn.functional.interpolate(
__UpperCamelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__UpperCamelCase )
return ppm_outs
class __A( nn.Module ):
def __init__( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : int ):
super().__init__()
lowerCamelCase_ = config
lowerCamelCase_ = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCamelCase_ = in_channels
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = False
lowerCamelCase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCamelCase_ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCamelCase_ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCamelCase_ = nn.ModuleList()
lowerCamelCase_ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCamelCase_ = UperNetConvModule(__UpperCamelCase , self.channels , kernel_size=1 )
lowerCamelCase_ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCamelCase )
self.fpn_convs.append(__UpperCamelCase )
lowerCamelCase_ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase__ ( self : Dict ):
self.apply(self._init_weights )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Tuple ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any ):
lowerCamelCase_ = inputs[-1]
lowerCamelCase_ = [x]
psp_outs.extend(self.psp_modules(__UpperCamelCase ) )
lowerCamelCase_ = torch.cat(__UpperCamelCase , dim=1 )
lowerCamelCase_ = self.bottleneck(__UpperCamelCase )
return output
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : torch.Tensor ):
# build laterals
lowerCamelCase_ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCamelCase ) )
# build top-down path
lowerCamelCase_ = len(__UpperCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase_ = laterals[i - 1].shape[2:]
lowerCamelCase_ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCamelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
lowerCamelCase_ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase_ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
lowerCamelCase_ = torch.cat(__UpperCamelCase , dim=1 )
lowerCamelCase_ = self.fpn_bottleneck(__UpperCamelCase )
lowerCamelCase_ = self.classifier(__UpperCamelCase )
return output
class __A( nn.Module ):
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 3 , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
lowerCamelCase_ = config
lowerCamelCase_ = config.auxiliary_in_channels
lowerCamelCase_ = config.auxiliary_channels
lowerCamelCase_ = config.auxiliary_num_convs
lowerCamelCase_ = config.auxiliary_concat_input
lowerCamelCase_ = in_index
lowerCamelCase_ = (kernel_size // 2) * dilation
lowerCamelCase_ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
if self.num_convs == 0:
lowerCamelCase_ = nn.Identity()
else:
lowerCamelCase_ = nn.Sequential(*__UpperCamelCase )
if self.concat_input:
lowerCamelCase_ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCamelCase , padding=kernel_size // 2 )
lowerCamelCase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase__ ( self : Dict ):
self.apply(self._init_weights )
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase__ ( self : Dict , __UpperCamelCase : torch.Tensor ):
# just take the relevant feature maps
lowerCamelCase_ = encoder_hidden_states[self.in_index]
lowerCamelCase_ = self.convs(__UpperCamelCase )
if self.concat_input:
lowerCamelCase_ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCamelCase_ = self.classifier(__UpperCamelCase )
return output
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = UperNetConfig
SCREAMING_SNAKE_CASE = '''pixel_values'''
SCREAMING_SNAKE_CASE = True
def lowercase__ ( self : str , __UpperCamelCase : Tuple ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase__ ( self : Tuple ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase__ ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any]=False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ = value
lowercase = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , UpperCAmelCase , )
class __A( UpperCAmelCase ):
def __init__( self : List[str] , __UpperCamelCase : Dict ):
super().__init__(__UpperCamelCase )
lowerCamelCase_ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCamelCase_ = UperNetHead(__UpperCamelCase , in_channels=self.backbone.channels )
lowerCamelCase_ = UperNetFCNHead(__UpperCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC )
def lowercase__ ( self : str , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , ):
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCamelCase_ = self.backbone.forward_with_filtered_kwargs(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , output_attentions=__UpperCamelCase )
lowerCamelCase_ = outputs.feature_maps
lowerCamelCase_ = self.decode_head(__UpperCamelCase )
lowerCamelCase_ = nn.functional.interpolate(__UpperCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__UpperCamelCase )
lowerCamelCase_ = None
if self.auxiliary_head is not None:
lowerCamelCase_ = self.auxiliary_head(__UpperCamelCase )
lowerCamelCase_ = nn.functional.interpolate(
__UpperCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__UpperCamelCase )
lowerCamelCase_ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
lowerCamelCase_ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCamelCase_ = loss_fct(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = loss_fct(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCamelCase_ = (logits,) + outputs[1:]
else:
lowerCamelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 272 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647 | def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Optional[int] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowercase ( _A ):
__a = """ctrl"""
__a = ["""past_key_values"""]
__a = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , SCREAMING_SNAKE_CASE__=246534 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=8192 , SCREAMING_SNAKE_CASE__=48 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Dict = n_positions
lowerCAmelCase__ : Tuple = n_embd
lowerCAmelCase__ : Tuple = n_layer
lowerCAmelCase__ : Dict = n_head
lowerCAmelCase__ : int = dff
lowerCAmelCase__ : Optional[Any] = resid_pdrop
lowerCAmelCase__ : Optional[Any] = embd_pdrop
lowerCAmelCase__ : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[int] = use_cache
super().__init__(**UpperCamelCase__ )
| 233 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Tuple = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase_ ( A : Dict , A : List[Any]=() , A : List[Any]=None , A : str="no" , A : Optional[Any]="29500" ):
"""simple docstring"""
lowerCAmelCase_ = False
lowerCAmelCase_ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCAmelCase_ = True
elif "IPython" in sys.modules:
lowerCAmelCase_ = 'google.colab' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __lowercase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCAmelCase_ = 8
lowerCAmelCase_ = PrepareForLaunch(__lowercase , distributed_type='''TPU''' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__lowercase )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='''127.0.01''' , master_port=__lowercase , mixed_precision=__lowercase ):
lowerCAmelCase_ = PrepareForLaunch(__lowercase , distributed_type='''MULTI_GPU''' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCAmelCase_ = '1'
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__lowercase )
def lowerCamelCase_ ( A : str , A : Union[str, Any]=() , A : Any=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowercase , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowerCAmelCase_ = PrepareForLaunch(__lowercase , debug=__lowercase )
start_processes(__lowercase , args=__lowercase , nprocs=__lowercase , start_method='''fork''' )
| 704 |
def lowerCamelCase_ ( ):
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(A , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 413 | 0 |
'''simple docstring'''
import os
__lowerCAmelCase = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
def __UpperCamelCase ( lowercase_ : str ):
"""simple docstring"""
a_ = 0
a_ = 0
while index < len(lowercase_ ) - 1:
a_ = SYMBOLS[numerals[index]]
a_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
a_ = ''
a_ = num // 1_000
numerals += m_count * "M"
num %= 1_000
a_ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
a_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __UpperCamelCase ( lowercase_ : str = "/p089_roman.txt" ):
"""simple docstring"""
a_ = 0
with open(os.path.dirname(lowercase_ ) + roman_numerals_filename ) as filea:
a_ = filea.readlines()
for line in lines:
a_ = line.strip()
a_ = parse_roman_numerals(lowercase_ )
a_ = generate_roman_numerals(lowercase_ )
savings += len(lowercase_ ) - len(lowercase_ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 536 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
a_ = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
a_ = -1
return False
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int ):
"""simple docstring"""
a_ = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 536 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( *_a , **_a ):
pass
def lowercase ( lowerCAmelCase__ : Image ) -> str:
__a = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase ( lowerCAmelCase__ : Image ) -> Dict:
__a = np.array(lowerCAmelCase__ )
__a = npimg.shape
return {"hash": hashimage(lowerCAmelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__UpperCAmelCase : List[str] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCAmelCase ( self , _a , _a ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __UpperCAmelCase ( self ):
pass
@slow
@require_torch
def __UpperCAmelCase ( self ):
__a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
__a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
__a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def __UpperCAmelCase ( self ):
__a = '''facebook/sam-vit-huge'''
__a = pipeline('''mask-generation''' , model=_a )
__a = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__a = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053},
] , )
| 65 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = 300
return config
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = MraModel(_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = ()
def __UpperCAmelCase ( self ):
__a = MraModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __UpperCAmelCase ( self ):
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 65 | 1 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 1 |
'''simple docstring'''
import operator as op
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Union[str, Any] =[]
lowerCAmelCase_ : Tuple =lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase_ : Any ={
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(_SCREAMING_SNAKE_CASE )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_SCREAMING_SNAKE_CASE ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
else:
lowerCAmelCase_ : Any =stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
lowerCAmelCase_ : int =stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' )
stack.append(
str(opr[x](int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 305 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__lowercase = logging.get_logger(__name__)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str] ):
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 305 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.